code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowercase__( ):
lowercase_ : Optional[Any] = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
lowercase_ : str = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
return image
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase_ : int = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase_ : Any = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : int = val
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase_ : str = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowercase_ : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowercase_ : List[str] = torch.cat((q_bias, torch.zeros_like(__SCREAMING_SNAKE_CASE , requires_grad=__SCREAMING_SNAKE_CASE ), v_bias) )
lowercase_ : List[Any] = qkv_bias
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase_ : List[Any] = 3_64 if 'coco' in model_name else 2_24
lowercase_ : Union[str, Any] = InstructBlipVisionConfig(image_size=__SCREAMING_SNAKE_CASE ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowercase_ : List[Any] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase_ : Optional[int] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowercase_ : Union[str, Any] = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_20_01 ).to_dict()
elif "vicuna-13b" in model_name:
lowercase_ : str = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_20_01 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowercase_ : int = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict()
lowercase_ : Any = InstructBlipConfig(vision_config=__SCREAMING_SNAKE_CASE , text_config=__SCREAMING_SNAKE_CASE , qformer_config=__SCREAMING_SNAKE_CASE )
return config, image_size
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
lowercase_ : Dict = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowercase_ : List[str] = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
lowercase_ , lowercase_ : Tuple = get_blipa_config(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = InstructBlipForConditionalGeneration(__SCREAMING_SNAKE_CASE ).eval()
lowercase_ : Optional[Any] = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
lowercase_ , lowercase_ : str = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
lowercase_ : List[Any] = 'cuda:1' if torch.cuda.is_available() else 'cpu'
lowercase_ : Any = 'cuda:2' if torch.cuda.is_available() else 'cpu'
lowercase_ , lowercase_ , lowercase_ : List[str] = load_model_and_preprocess(
name=__SCREAMING_SNAKE_CASE , model_type=__SCREAMING_SNAKE_CASE , is_eval=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
original_model.eval()
print('Done!' )
# update state dict keys
lowercase_ : int = original_model.state_dict()
lowercase_ : Union[str, Any] = create_rename_keys(__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase_ : Optional[int] = state_dict.pop(__SCREAMING_SNAKE_CASE )
if key.startswith('Qformer.bert' ):
lowercase_ : str = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
lowercase_ : List[Any] = key.replace('self' , 'attention' )
if "llm_proj" in key:
lowercase_ : int = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
lowercase_ : Any = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
lowercase_ : Any = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
lowercase_ : str = key.replace('t5' , 'language' )
lowercase_ : Any = val
# read in qv biases
read_in_q_v_bias(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
lowercase_ : int = load_demo_image()
lowercase_ : Tuple = 'What is unusual about this image?'
# create processor
lowercase_ : int = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=__SCREAMING_SNAKE_CASE , image_std=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = InstructBlipProcessor(
image_processor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , qformer_tokenizer=__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[int] = processor(images=__SCREAMING_SNAKE_CASE , text=__SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(__SCREAMING_SNAKE_CASE )
# make sure processor creates exact same pixel values
lowercase_ : str = vis_processors['eval'](__SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __SCREAMING_SNAKE_CASE )
original_model.to(__SCREAMING_SNAKE_CASE )
hf_model.to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
if "vicuna" in model_name:
lowercase_ : Tuple = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
lowercase_ : List[str] = hf_model(**__SCREAMING_SNAKE_CASE ).logits
else:
lowercase_ : Optional[int] = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
lowercase_ : List[Any] = tokenizer('\n' , return_tensors='pt' ).input_ids.to(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_00 )
lowercase_ : str = hf_model(**__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowercase_ : Optional[int] = 1E-4 if 'vicuna' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE )
print('Looks ok!' )
print('Generating with original model...' )
lowercase_ : Tuple = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
lowercase_ : List[Any] = hf_model.generate(
**__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , num_beams=5 , max_length=2_56 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowercase_ : Any = 2
print('Original generation:' , __SCREAMING_SNAKE_CASE )
lowercase_ : int = processor.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = [text.strip() for text in output_text]
print('HF generation:' , __SCREAMING_SNAKE_CASE )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
processor.push_to_hub(F'''Salesforce/{model_name}''' )
hf_model.push_to_hub(F'''Salesforce/{model_name}''' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
__SCREAMING_SNAKE_CASE =[
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 425
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
__SCREAMING_SNAKE_CASE =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__SCREAMING_SNAKE_CASE =CLIPImageProcessor()
__SCREAMING_SNAKE_CASE =CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
__SCREAMING_SNAKE_CASE =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 425
| 1
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class lowercase ( __lowercase ):
def __init__( self , *_a , **_a ) -> int:
super().__init__(*_a , **_a )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def a__ ( self , _a=None ) -> Dict:
_A : Dict = {}
if top_k is not None:
_A : Optional[int] = top_k
return {}, {}, postprocess_params
def __call__( self , _a , **_a ) -> Dict:
return super().__call__(_a , **_a )
def a__ ( self , _a ) -> Union[str, Any]:
_A : int = load_image(_a )
_A : str = self.image_processor(images=_a , return_tensors=self.framework )
return model_inputs
def a__ ( self , _a ) -> Dict:
_A : Any = self.model(**_a )
return model_outputs
def a__ ( self , _a , _a=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
_A : Dict = self.model.config.num_labels
if self.framework == "pt":
_A : Tuple = model_outputs.logits.softmax(-1 )[0]
_A : int = probs.topk(_a )
elif self.framework == "tf":
_A : Optional[int] = stable_softmax(model_outputs.logits , axis=-1 )[0]
_A : Optional[Any] = tf.math.top_k(_a , k=_a )
_A : Any = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_A : int = scores.tolist()
_A : Optional[int] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_a , _a )]
| 705
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=10 , _a=160 , _a=8 , _a=0.0 , _a=4000 , _a=False , _a=True , ) -> Optional[int]:
_A : Any = parent
_A : List[Any] = batch_size
_A : List[Any] = min_seq_length
_A : Dict = max_seq_length
_A : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = padding_value
_A : Tuple = sampling_rate
_A : str = return_attention_mask
_A : Any = do_normalize
_A : Union[str, Any] = feature_size
_A : List[Any] = chunk_length
_A : List[Any] = hop_length
def a__ ( self ) -> List[str]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , _a=False , _a=False ) -> List[str]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : Any = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = WhisperFeatureExtractor if is_speech_available() else None
def a__ ( self ) -> Tuple:
_A : Optional[int] = WhisperFeatureExtractionTester(self )
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Tuple = feat_extract_first.to_dict()
_A : List[Any] = feat_extract_second.to_dict()
_A : List[Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Dict:
_A : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Dict = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Optional[int] = self.feature_extraction_class.from_json_file(_a )
_A : str = feat_extract_first.to_dict()
_A : Any = feat_extract_second.to_dict()
_A : Union[str, Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Any = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_A : Dict = feature_extractor(_a , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_A : List[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_A : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : Tuple = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_A : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Any = np.asarray(_a )
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : int = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test truncation required
_A : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs]
_A : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs_truncated]
_A : Optional[int] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : List[Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> Dict:
import torch
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
_A : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_A : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ ( self , _a ) -> Dict:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Optional[Any] = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Tuple:
# fmt: off
_A : Dict = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_A : Dict = self._load_datasamples(1 )
_A : Optional[Any] = WhisperFeatureExtractor()
_A : Optional[Any] = feature_extractor(_a , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1e-4 ) )
def a__ ( self ) -> str:
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : str = self._load_datasamples(1 )[0]
_A : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_A : List[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1e-3 ) )
| 54
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Union[str, Any] , ):
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = True
A = True
A = False
A = False
A = False
A = 2
A = 99
A = 0
A = 32
A = 2
A = 4
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.02
A = 3
A = 4
A = 'last'
A = True
A = None
A = 0
def UpperCamelCase ( self : Any ):
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
A = None
if self.use_input_lengths:
A = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
A = ids_tensor([self.batch_size] , self.num_choices )
A = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , ):
A = TFFlaubertModel(config=__A )
A = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
A = model(__A )
A = [input_ids, input_mask]
A = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , ):
A = TFFlaubertWithLMHeadModel(__A )
A = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
A = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ):
A = TFFlaubertForQuestionAnsweringSimple(__A )
A = {'input_ids': input_ids, 'lengths': input_lengths}
A = model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , ):
A = TFFlaubertForSequenceClassification(__A )
A = {'input_ids': input_ids, 'lengths': input_lengths}
A = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , ):
A = self.num_labels
A = TFFlaubertForTokenClassification(config=__A )
A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , ):
A = self.num_choices
A = TFFlaubertForMultipleChoice(config=__A )
A = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
A = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
A = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
A = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
A = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Any ):
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE : Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
def UpperCamelCase ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase ( self : Optional[int] ):
A = TFFlaubertModelTester(self )
A = ConfigTester(self , config_class=__A , emb_dim=37 )
def UpperCamelCase ( self : Any ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Union[str, Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__A )
def UpperCamelCase ( self : Tuple ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__A )
def UpperCamelCase ( self : List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__A )
def UpperCamelCase ( self : List[Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__A )
def UpperCamelCase ( self : Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__A )
def UpperCamelCase ( self : str ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__A )
@slow
def UpperCamelCase ( self : Any ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFFlaubertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase ( self : List[Any] ):
A = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
A = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
A = model(__A )[0]
A = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __A )
# compare the actual values for a slice.
A = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 699
|
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( _lowerCAmelCase )-> bool:
__UpperCAmelCase = len(_lowerCAmelCase )
# We need to create solution object to save path.
__UpperCAmelCase = [[0 for _ in range(_lowerCAmelCase )] for _ in range(_lowerCAmelCase )]
__UpperCAmelCase = run_maze(_lowerCAmelCase , 0 , 0 , _lowerCAmelCase )
if solved:
print('\n'.join(str(_lowerCAmelCase ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> bool:
__UpperCAmelCase = len(_lowerCAmelCase )
# Final check point.
if i == j == (size - 1):
__UpperCAmelCase = 1
return True
__UpperCAmelCase = (not i < 0) and (not j < 0) # Check lower bounds
__UpperCAmelCase = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__UpperCAmelCase = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__UpperCAmelCase = 1
# check for directions
if (
run_maze(_lowerCAmelCase , i + 1 , _lowerCAmelCase , _lowerCAmelCase )
or run_maze(_lowerCAmelCase , _lowerCAmelCase , j + 1 , _lowerCAmelCase )
or run_maze(_lowerCAmelCase , i - 1 , _lowerCAmelCase , _lowerCAmelCase )
or run_maze(_lowerCAmelCase , _lowerCAmelCase , j - 1 , _lowerCAmelCase )
):
return True
__UpperCAmelCase = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 126
| 0
|
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class __UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase_ = '''xlm-prophetnet'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self : List[Any] , _A : Optional[float] = 0.1 , _A : Optional[Union[str, Callable]] = "gelu" , _A : Optional[int] = 3_0522 , _A : Optional[int] = 1024 , _A : Optional[int] = 4096 , _A : Optional[int] = 12 , _A : Optional[int] = 16 , _A : Optional[int] = 4096 , _A : Optional[int] = 12 , _A : Optional[int] = 16 , _A : Optional[float] = 0.1 , _A : Optional[float] = 0.1 , _A : Optional[int] = 512 , _A : Optional[float] = 0.02 , _A : Optional[bool] = True , _A : Optional[bool] = True , _A : Optional[int] = 0 , _A : Optional[int] = 2 , _A : Optional[int] = 32 , _A : Optional[int] = 128 , _A : Optional[bool] = False , _A : Optional[float] = 0.0 , _A : Optional[bool] = True , _A : Optional[int] = 0 , _A : Optional[int] = 1 , _A : Optional[int] = 2 , **_A : List[Any] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = encoder_ffn_dim
__SCREAMING_SNAKE_CASE : Dict = num_encoder_layers
__SCREAMING_SNAKE_CASE : Tuple = num_encoder_attention_heads
__SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = num_decoder_layers
__SCREAMING_SNAKE_CASE : List[str] = num_decoder_attention_heads
__SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
__SCREAMING_SNAKE_CASE : str = init_std # Normal(0, this parameter)
__SCREAMING_SNAKE_CASE : List[str] = activation_function
# parameters for xlmprophetnet
__SCREAMING_SNAKE_CASE : Any = ngram
__SCREAMING_SNAKE_CASE : Optional[Any] = num_buckets
__SCREAMING_SNAKE_CASE : int = relative_max_distance
__SCREAMING_SNAKE_CASE : Optional[int] = disable_ngram_loss
__SCREAMING_SNAKE_CASE : Optional[int] = eps
# 3 Types of Dropout
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout
__SCREAMING_SNAKE_CASE : int = activation_dropout
__SCREAMING_SNAKE_CASE : int = dropout
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , add_cross_attention=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCAmelCase__ ( self : Union[str, Any] , _A : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 718
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowercase_ = float("""nan""")
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = sys.stdout
__SCREAMING_SNAKE_CASE : int = open(_A , '''a''' )
def __getattr__( self : int , _A : str ):
"""simple docstring"""
return getattr(self.stdout , _A )
def UpperCAmelCase__ ( self : Dict , _A : Any ):
"""simple docstring"""
self.stdout.write(_A )
# strip tqdm codes
self.file.write(re.sub(r'''^.*\r''' , '''''' , _A , 0 , re.M ) )
def a__ ( snake_case=80 , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
# deal with critical env vars
__SCREAMING_SNAKE_CASE : List[Any] = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
__SCREAMING_SNAKE_CASE : Any = os.environ.get(snake_case , snake_case )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__SCREAMING_SNAKE_CASE : Optional[int] = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(snake_case )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__SCREAMING_SNAKE_CASE : Tuple = []
__SCREAMING_SNAKE_CASE : List[Any] = ''''''
while len(snake_case ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(snake_case ) == 0 or len(snake_case ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
return "\\\n".join(snake_case )
def a__ ( snake_case , snake_case ):
"""simple docstring"""
# unwrap multi-line input
__SCREAMING_SNAKE_CASE : Dict = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
__SCREAMING_SNAKE_CASE : Any = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__SCREAMING_SNAKE_CASE : Any = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = subprocess.run(snake_case , capture_output=snake_case , text=snake_case )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
__SCREAMING_SNAKE_CASE : Optional[int] = variation.replace(''' ''' , '''-''' )
with open(Path(snake_case ) / F'''log.{prefix}.stdout.txt''' , '''w''' ) as f:
f.write(result.stdout )
with open(Path(snake_case ) / F'''log.{prefix}.stderr.txt''' , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE : Any = json.load(snake_case )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : str = F'''{id}: {variation:<{longest_variation_len}}'''
__SCREAMING_SNAKE_CASE : Optional[int] = F'''{preamble}: '''
__SCREAMING_SNAKE_CASE : Optional[Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(snake_case ) , desc=snake_case , leave=snake_case ):
__SCREAMING_SNAKE_CASE : str = process_run_single(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : List[str] = single_run_metrics[target_metric_key]
if not math.isnan(snake_case ):
metrics.append(snake_case )
results.append(snake_case )
outcome += "✓"
else:
outcome += "✘"
__SCREAMING_SNAKE_CASE : str = F'''\33[2K\r{outcome}'''
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__SCREAMING_SNAKE_CASE : Optional[Any] = round(mean_metrics[target_metric_key] , 2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{outcome} {mean_target}'''
if len(snake_case ) > 1:
results_str += F''' {tuple(round(snake_case , 2 ) for x in results )}'''
print(snake_case )
__SCREAMING_SNAKE_CASE : Tuple = variation
return mean_metrics
else:
print(snake_case )
return {variation_key: variation, target_metric_key: nan}
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = pd.DataFrame(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = '''variation'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''diff_%'''
__SCREAMING_SNAKE_CASE : str = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__SCREAMING_SNAKE_CASE : List[str] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(snake_case ):
# as a fallback, use the minimal value as the sentinel
__SCREAMING_SNAKE_CASE : Optional[Any] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(snake_case ):
__SCREAMING_SNAKE_CASE : Optional[Any] = df.apply(
lambda snake_case : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
__SCREAMING_SNAKE_CASE : List[Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__SCREAMING_SNAKE_CASE : Union[str, Any] = df.reindex(snake_case , axis='''columns''' ) # reorder cols
# capitalize
__SCREAMING_SNAKE_CASE : str = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
__SCREAMING_SNAKE_CASE : Any = df.rename(lambda snake_case : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
__SCREAMING_SNAKE_CASE : int = df.rename(lambda snake_case : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
__SCREAMING_SNAKE_CASE : int = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=snake_case , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=snake_case , floatfmt='''.2f''' )]
print('''\n\n'''.join(snake_case ) )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=snake_case , type=snake_case , required=snake_case , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=snake_case , type=snake_case , nargs='''+''' , required=snake_case , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=snake_case , type=snake_case , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=snake_case , type=snake_case , required=snake_case , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=snake_case , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=snake_case , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=snake_case , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=snake_case , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
__SCREAMING_SNAKE_CASE : str = args.output_dir
Path(snake_case ).mkdir(exist_ok=snake_case )
__SCREAMING_SNAKE_CASE : int = get_base_command(snake_case , snake_case )
# split each dimension into its --foo variations
__SCREAMING_SNAKE_CASE : Optional[Any] = [list(map(str.strip , re.split(R'''\|''' , snake_case ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(map(str.strip , map(''' '''.join , itertools.product(*snake_case ) ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = max(len(snake_case ) for x in variations )
# split wanted keys
__SCREAMING_SNAKE_CASE : List[Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
__SCREAMING_SNAKE_CASE : Any = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
__SCREAMING_SNAKE_CASE : str = Tee(snake_case )
print(F'''\n*** Running {len(snake_case )} benchmarks:''' )
print(F'''Base command: {" ".join(snake_case )}''' )
__SCREAMING_SNAKE_CASE : str = '''variation'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for id, variation in enumerate(tqdm(snake_case , desc='''Total completion: ''' , leave=snake_case ) ):
__SCREAMING_SNAKE_CASE : int = base_cmd + variation.split()
results.append(
process_run(
id + 1 , snake_case , snake_case , snake_case , snake_case , args.target_metric_key , snake_case , args.repeat_times , snake_case , args.verbose , ) )
process_results(snake_case , args.target_metric_key , snake_case , args.base_variation , snake_case )
if __name__ == "__main__":
main()
| 131
| 0
|
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *snake_case , **snake_case ):
'''simple docstring'''
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead." , snake_case , )
super().__init__(*snake_case , **snake_case )
| 551
|
import numpy as np
def UpperCamelCase_( _A :np.array )-> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = AltDiffusionPipeline
__lowerCAmelCase : Tuple = TEXT_TO_IMAGE_PARAMS
__lowerCAmelCase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCAmelCase : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
lowercase = CLIPTextModel(lowerCamelCase__ )
lowercase = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowercase = 77
lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _lowerCAmelCase ( self : Tuple , a : Optional[int] , a : Tuple=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(lowerCamelCase__ ).startswith('''mps''' ):
lowercase = torch.manual_seed(lowerCamelCase__ )
else:
lowercase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
torch.manual_seed(0 )
lowercase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase = RobertaSeriesModelWithTransformation(lowerCamelCase__ )
lowercase = text_encoder
lowercase = AltDiffusionPipeline(**lowerCamelCase__ )
lowercase = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase = self.get_dummy_inputs(lowerCamelCase__ )
lowercase = '''A photo of an astronaut'''
lowercase = alt_pipe(**lowerCamelCase__ )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
lowercase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase = RobertaSeriesModelWithTransformation(lowerCamelCase__ )
lowercase = text_encoder
lowercase = AltDiffusionPipeline(**lowerCamelCase__ )
lowercase = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase = self.get_dummy_inputs(lowerCamelCase__ )
lowercase = alt_pipe(**lowerCamelCase__ )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=lowerCamelCase__ )
lowercase = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase = '''A painting of a squirrel eating a burger'''
lowercase = torch.manual_seed(0 )
lowercase = alt_pipe([prompt] , generator=lowerCamelCase__ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowercase = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ )
lowercase = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase = '''A painting of a squirrel eating a burger'''
lowercase = torch.manual_seed(0 )
lowercase = alt_pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='''numpy''' )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 718
|
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def A_ ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
lowercase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert('''RGB''' )
lowercase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase = transform(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
return image
def A_ ( __UpperCamelCase : str ):
if "visual_encoder" in key:
lowercase = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , __UpperCamelCase )
if "blocks" in key:
lowercase = re.sub(R'''blocks''' , '''layers''' , __UpperCamelCase )
if "attn" in key:
lowercase = re.sub(R'''attn''' , '''self_attn''' , __UpperCamelCase )
if "norm1" in key:
lowercase = re.sub(R'''norm1''' , '''layer_norm1''' , __UpperCamelCase )
if "norm2" in key:
lowercase = re.sub(R'''norm2''' , '''layer_norm2''' , __UpperCamelCase )
if "encoder.norm" in key:
lowercase = re.sub(R'''encoder.norm''' , '''post_layernorm''' , __UpperCamelCase )
if "encoder.patch_embed.proj" in key:
lowercase = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , __UpperCamelCase )
if "encoder.pos_embed" in key:
lowercase = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , __UpperCamelCase )
if "encoder.cls_token" in key:
lowercase = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , __UpperCamelCase )
if "self_attn" in key:
lowercase = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , __UpperCamelCase )
return key
@torch.no_grad()
def A_ ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int]=None ):
if config_path is not None:
lowercase = BlipConfig.from_pretrained(__UpperCamelCase )
else:
lowercase = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
lowercase = BlipForConditionalGeneration(__UpperCamelCase ).eval()
lowercase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase = blip_decoder(pretrained=__UpperCamelCase , image_size=3_84 , vit='''base''' )
lowercase = pt_model.eval()
lowercase = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase = modified_state_dict.pop(__UpperCamelCase )
lowercase = rename_key(__UpperCamelCase )
lowercase = value
hf_model.load_state_dict(__UpperCamelCase )
lowercase = 3_84
lowercase = load_demo_image(image_size=__UpperCamelCase , device='''cpu''' )
lowercase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase = tokenizer(['''a picture of'''] ).input_ids
lowercase = hf_model.generate(__UpperCamelCase , __UpperCamelCase )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
lowercase = hf_model.generate(__UpperCamelCase )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__UpperCamelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase = blip_vqa(pretrained=__UpperCamelCase , image_size=__UpperCamelCase , vit='''base''' )
vqa_model.eval()
lowercase = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase = modified_state_dict.pop(__UpperCamelCase )
lowercase = rename_key(__UpperCamelCase )
lowercase = value
lowercase = BlipForQuestionAnswering(__UpperCamelCase )
hf_vqa_model.load_state_dict(__UpperCamelCase )
lowercase = ['''How many dogs are in this image?''']
lowercase = tokenizer(__UpperCamelCase , return_tensors='''pt''' ).input_ids
lowercase = hf_vqa_model.generate(__UpperCamelCase , __UpperCamelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase = blip_itm(pretrained=__UpperCamelCase , image_size=__UpperCamelCase , vit='''base''' )
itm_model.eval()
lowercase = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase = modified_state_dict.pop(__UpperCamelCase )
lowercase = rename_key(__UpperCamelCase )
lowercase = value
lowercase = BlipForImageTextRetrieval(__UpperCamelCase )
lowercase = ['''A picture of a woman with a dog sitting in a beach''']
lowercase = tokenizer(
__UpperCamelCase , return_tensors='''pt''' , padding='''max_length''' , truncation=__UpperCamelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__UpperCamelCase )
hf_itm_model.eval()
lowercase = hf_itm_model(__UpperCamelCase , __UpperCamelCase , use_itm_head=__UpperCamelCase )
lowercase = hf_itm_model(__UpperCamelCase , __UpperCamelCase , use_itm_head=__UpperCamelCase )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 396
| 0
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
UpperCamelCase_ : Optional[Any] = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
UpperCamelCase_ : Tuple = concatenate_datasets
UpperCamelCase_ : Optional[int] = DownloadConfig
UpperCamelCase_ : str = DownloadManager
UpperCamelCase_ : int = DownloadMode
UpperCamelCase_ : List[str] = DownloadConfig
UpperCamelCase_ : Optional[Any] = DownloadMode
UpperCamelCase_ : Optional[Any] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 115
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ : List[str] = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Tuple = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 115
| 1
|
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
self.assertEqual(len(_UpperCAmelCase) , len(_UpperCAmelCase))
for a, b in zip(_UpperCAmelCase , _UpperCAmelCase):
self.assertAlmostEqual(_UpperCAmelCase , _UpperCAmelCase , delta=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0])])
accumulator([tf.constant([-2.0, 1.0])])
accumulator([tf.constant([-1.0, 2.0])])
with self.assertRaises(_UpperCAmelCase):
accumulator([tf.constant([1.0, 1.0]), tf.constant([2.0, 2.0])])
self.assertEqual(accumulator.step , 3)
self.assertEqual(len(accumulator.gradients) , 1)
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2)
accumulator.reset()
self.assertEqual(accumulator.step , 0)
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = None
ops.enable_eager_execution_internal()
__A : Optional[int] = tf.config.list_physical_devices('CPU')
if len(_UpperCAmelCase) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()])
__A : Tuple = tf.config.list_logical_devices(device_type='CPU')
__A : int = tf.distribute.MirroredStrategy(devices=devices[:2])
with strategy.scope():
__A : List[str] = GradientAccumulator()
__A : Any = tf.Variable([4.0, 3.0])
__A : Dict = create_optimizer(5e-5 , 10 , 5)
__A : Dict = tf.Variable([0.0, 0.0] , trainable=_UpperCAmelCase)
def accumulate_on_replica(_UpperCAmelCase):
accumulator([gradient])
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable])))
@tf.function
def accumulate(_UpperCAmelCase , _UpperCAmelCase):
with strategy.scope():
__A : int = strategy.experimental_local_results(_UpperCAmelCase)
local_variables[0].assign(_UpperCAmelCase)
local_variables[1].assign(_UpperCAmelCase)
strategy.run(_UpperCAmelCase , args=(gradient_placeholder,))
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_UpperCAmelCase)
def _check_local_values(_UpperCAmelCase , _UpperCAmelCase):
__A : Union[str, Any] = strategy.experimental_local_results(accumulator._gradients[0])
self.assertListAlmostEqual(values[0].value() , _UpperCAmelCase , tol=1e-2)
self.assertListAlmostEqual(values[1].value() , _UpperCAmelCase , tol=1e-2)
accumulate([1.0, 2.0] , [-1.0, 1.0])
accumulate([3.0, -1.0] , [-1.0, -1.0])
accumulate([-2.0, 2.0] , [3.0, -2.0])
self.assertEqual(accumulator.step , 3)
_check_local_values([2.0, 3.0] , [1.0, -2.0])
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2)
accumulator.reset()
self.assertEqual(accumulator.step , 0)
_check_local_values([0.0, 0.0] , [0.0, 0.0])
| 701
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 338
| 0
|
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__A = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__A = logging.get_logger(__name__)
class UpperCAmelCase (__UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = "maskformer"
_UpperCAmelCase :Tuple = {"hidden_size": "mask_feature_size"}
_UpperCAmelCase :Optional[Any] = ["resnet", "swin"]
_UpperCAmelCase :List[Any] = ["detr"]
def __init__( self , _UpperCAmelCase = 256 , _UpperCAmelCase = 256 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0.02 , _UpperCAmelCase = 1.0 , _UpperCAmelCase = 1.0 , _UpperCAmelCase = 1.0 , _UpperCAmelCase = 20.0 , _UpperCAmelCase = None , **_UpperCAmelCase , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase__: int = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(snake_case__ , snake_case__ ):
lowercase__: Optional[Any] = backbone_config.pop('''model_type''' )
lowercase__: Tuple = CONFIG_MAPPING[backbone_model_type]
lowercase__: Tuple = config_class.from_dict(snake_case__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
F"""Supported model types: {','.join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase__: Optional[int] = DetrConfig()
else:
# verify that the decoder is supported
lowercase__: Tuple = (
decoder_config.pop('''model_type''' ) if isinstance(snake_case__ , snake_case__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"""Transformer Decoder {decoder_type} not supported, please use one of"""
F""" {','.join(self.decoders_supported )}""" )
if isinstance(snake_case__ , snake_case__ ):
lowercase__: Dict = CONFIG_MAPPING[decoder_type]
lowercase__: str = config_class.from_dict(snake_case__ )
lowercase__: Any = backbone_config
lowercase__: Any = decoder_config
# main feature dimension for the model
lowercase__: Any = fpn_feature_size
lowercase__: Union[str, Any] = mask_feature_size
# initializer
lowercase__: Tuple = init_std
lowercase__: List[Any] = init_xavier_std
# Hungarian matcher && loss
lowercase__: Union[str, Any] = cross_entropy_weight
lowercase__: Union[str, Any] = dice_weight
lowercase__: Union[str, Any] = mask_weight
lowercase__: Optional[Any] = use_auxiliary_loss
lowercase__: Optional[int] = no_object_weight
lowercase__: Tuple = output_auxiliary_logits
lowercase__: Tuple = self.decoder_config.encoder_attention_heads
lowercase__: int = self.decoder_config.num_hidden_layers
super().__init__(**snake_case__ )
@classmethod
def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(
backbone_config=snake_case__ , decoder_config=snake_case__ , **snake_case__ , )
def _snake_case ( self ):
lowercase__: List[str] = copy.deepcopy(self.__dict__ )
lowercase__: str = self.backbone_config.to_dict()
lowercase__: Optional[int] = self.decoder_config.to_dict()
lowercase__: List[Any] = self.__class__.model_type
return output
| 586
|
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( __UpperCAmelCase ):
@require_torch
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Optional[Any] = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase :Any = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase :Tuple = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase :str = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='''fill-mask''' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
lowercase :Union[str, Any] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase :Any = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase :List[Any] = '''1'''
lowercase :List[str] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :List[str] = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase :Dict = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase :List[Any] = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase :str = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='''fill-mask''' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
lowercase :List[str] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase :str = self.get_env()
lowercase :str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :str = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase :Union[str, Any] = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase :Optional[int] = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase :Optional[Any] = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase :Union[str, Any] = self.get_env()
lowercase :str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase :Tuple = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase :Any = '''1'''
lowercase :Optional[Any] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Dict = '''
from transformers import pipeline
'''
lowercase :Optional[Any] = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase :Dict = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase :Tuple = self.get_env()
lowercase :Optional[Any] = '''1'''
lowercase :Optional[int] = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase :str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :List[Any] = '''
from transformers import AutoModel
'''
lowercase :Union[str, Any] = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase :Union[str, Any] = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase :List[str] = self.get_env()
lowercase :Optional[int] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase :List[Any] = '''1'''
lowercase :Tuple = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 677
| 0
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
a_ : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
def __init__(self , *__a , __a=None , __a=None , __a=None , **__a ):
'''simple docstring'''
super().__init__(*__a , **__a )
lowerCamelCase = eval_examples
lowerCamelCase = post_process_function
lowerCamelCase = quant_trainer_args
lowerCamelCase = 1_28 # default number of calibration samples
def _a (self , __a=None ):
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
lowerCamelCase = calib_dataset if calib_dataset is not None else self.calib_dataset
lowerCamelCase = self._remove_unused_columns(__a , description="Calibration" )
return DataLoader(
__a , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__a , )
def _a (self , __a=None ):
'''simple docstring'''
lowerCamelCase = self.train_dataset if calib_dataset is None else calib_dataset
lowerCamelCase = self.get_calib_dataloader(__a )
lowerCamelCase = self.model
quant_trainer.configure_model(__a , self.quant_trainer_args , calib=__a )
model.eval()
quant_trainer.enable_calibration(__a )
logger.info("***** Running calibration *****" )
logger.info(F""" Num examples = {self.calib_num}""" )
logger.info(F""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(__a ):
# Prediction step
lowerCamelCase , lowerCamelCase , lowerCamelCase = self.prediction_step(__a , __a , prediction_loss_only=__a )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__a , self.quant_trainer_args )
lowerCamelCase = model
def _a (self , __a=None , __a=None , __a=None , __a = "eval" ):
'''simple docstring'''
lowerCamelCase = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase = self.get_eval_dataloader(__a )
lowerCamelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase = self.compute_metrics
lowerCamelCase = None
lowerCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase = eval_loop(
__a , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , )
finally:
lowerCamelCase = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowerCamelCase = self.post_process_function(__a , __a , output.predictions )
lowerCamelCase = self.compute_metrics(__a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowerCamelCase = metrics.pop(__a )
self.log(__a )
else:
lowerCamelCase = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , __a )
return metrics
def _a (self , __a , __a , __a=None , __a = "test" ):
'''simple docstring'''
lowerCamelCase = self.get_test_dataloader(__a )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase = self.compute_metrics
lowerCamelCase = None
lowerCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase = eval_loop(
__a , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , )
finally:
lowerCamelCase = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase = self.post_process_function(__a , __a , output.predictions , "predict" )
lowerCamelCase = self.compute_metrics(__a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowerCamelCase = metrics.pop(__a )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__a )
def _a (self , __a="./" ):
'''simple docstring'''
lowerCamelCase = self.eval_dataset
lowerCamelCase = self.get_eval_dataloader(__a )
lowerCamelCase = next(iter(__a ) )
# saving device - to make it consistent
lowerCamelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
lowerCamelCase = tuple(v.to(__a ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
lowerCamelCase = True
lowerCamelCase = self.model.to(__a )
model.eval()
model.float()
lowerCamelCase = model.module if hasattr(__a , "module" ) else model
quant_trainer.configure_model(__a , self.quant_trainer_args )
lowerCamelCase = os.path.join(__a , "model.onnx" )
logger.info(F"""exporting model to {output_model_file}""" )
lowerCamelCase = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
__a , __a , __a , export_params=__a , opset_version=13 , do_constant_folding=__a , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} , verbose=__a , )
logger.info("onnx export finished" )
| 484
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowerCamelCase__ :
"""simple docstring"""
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , __a=0 , ):
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_input_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_labels
lowerCamelCase = num_choices
lowerCamelCase = scope
lowerCamelCase = projection_dim
def _a (self ):
'''simple docstring'''
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
lowerCamelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a (self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = TFDPRContextEncoder(config=__a )
lowerCamelCase = model(__a , attention_mask=__a , token_type_ids=__a )
lowerCamelCase = model(__a , token_type_ids=__a )
lowerCamelCase = model(__a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a (self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = TFDPRQuestionEncoder(config=__a )
lowerCamelCase = model(__a , attention_mask=__a , token_type_ids=__a )
lowerCamelCase = model(__a , token_type_ids=__a )
lowerCamelCase = model(__a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a (self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = TFDPRReader(config=__a )
lowerCamelCase = model(__a , attention_mask=__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) = config_and_inputs
lowerCamelCase = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
"""simple docstring"""
_A = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_A = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
_A = False
_A = False
_A = False
_A = False
_A = False
def _a (self ):
'''simple docstring'''
lowerCamelCase = TFDPRModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=__a , hidden_size=37 )
def _a (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__a )
@slow
def _a (self ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = TFDPRContextEncoder.from_pretrained(__a )
self.assertIsNotNone(__a )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = TFDPRContextEncoder.from_pretrained(__a )
self.assertIsNotNone(__a )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = TFDPRQuestionEncoder.from_pretrained(__a )
self.assertIsNotNone(__a )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = TFDPRReader.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@slow
def _a (self ):
'''simple docstring'''
lowerCamelCase = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
lowerCamelCase = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCamelCase = model(__a )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCamelCase = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 484
| 1
|
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : torch.FloatTensor
lowerCAmelCase__ : Optional[torch.FloatTensor] = None
def _SCREAMING_SNAKE_CASE (A , A=0.999 , A="cosine" , ) -> Optional[int]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
lowercase__ = []
for i in range(A ):
lowercase__ = i / num_diffusion_timesteps
lowercase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A ) / alpha_bar_fn(A ) , A ) )
return torch.tensor(A , dtype=torch.floataa )
class __lowerCAmelCase (lowercase_ , lowercase_ ):
'''simple docstring'''
@register_to_config
def __init__(self : Union[str, Any] , UpperCamelCase : int = 1000 , UpperCamelCase : str = "fixed_small_log" , UpperCamelCase : bool = True , UpperCamelCase : Optional[float] = 1.0 , UpperCamelCase : str = "epsilon" , UpperCamelCase : str = "squaredcos_cap_v2" , ):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
lowercase__ = betas_for_alpha_bar(UpperCamelCase )
lowercase__ = 1.0 - self.betas
lowercase__ = torch.cumprod(self.alphas , dim=0 )
lowercase__ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowercase__ = 1.0
# setable values
lowercase__ = None
lowercase__ = torch.from_numpy(np.arange(0 , UpperCamelCase )[::-1].copy() )
lowercase__ = variance_type
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def UpperCamelCase__ (self : List[str] , UpperCamelCase : int , UpperCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = num_inference_steps
lowercase__ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowercase__ = (np.arange(0 , UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowercase__ = torch.from_numpy(UpperCamelCase ).to(UpperCamelCase )
def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Dict=None , UpperCamelCase : int=None , UpperCamelCase : Dict=None ):
'''simple docstring'''
if prev_timestep is None:
lowercase__ = t - 1
lowercase__ = self.alphas_cumprod[t]
lowercase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase__ = self.betas[t]
else:
lowercase__ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowercase__ = torch.log(torch.clamp(UpperCamelCase , min=1E-20 ) )
lowercase__ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowercase__ = variance.log()
lowercase__ = beta.log()
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
lowercase__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowercase__ ,lowercase__ = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
if prev_timestep is None:
lowercase__ = t - 1
lowercase__ = self.alphas_cumprod[t]
lowercase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase__ = self.betas[t]
lowercase__ = self.alphas[t]
else:
lowercase__ = 1 - alpha_prod_t / alpha_prod_t_prev
lowercase__ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = torch.clamp(
UpperCamelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowercase__ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowercase__ = 0
if t > 0:
lowercase__ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase , device=model_output.device )
lowercase__ = self._get_variance(
UpperCamelCase , predicted_variance=UpperCamelCase , prev_timestep=UpperCamelCase , )
if self.variance_type == "fixed_small_log":
lowercase__ = variance
elif self.variance_type == "learned_range":
lowercase__ = (0.5 * variance).exp()
else:
raise ValueError(
f"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"
''' for the UnCLIPScheduler.''' )
lowercase__ = variance * variance_noise
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : torch.FloatTensor , UpperCamelCase : torch.IntTensor , ):
'''simple docstring'''
lowercase__ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = alphas_cumprod[timesteps] ** 0.5
lowercase__ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowercase__ = sqrt_alpha_prod.unsqueeze(-1 )
lowercase__ = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase__ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowercase__ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowercase__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 460
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCamelCase : int = 6 ):
'''simple docstring'''
lowercase__ = None
lowercase__ = None
self.create_linked_list(UpperCamelCase )
def UpperCamelCase__ (self : List[str] , UpperCamelCase : int ):
'''simple docstring'''
lowercase__ = Node()
lowercase__ = current_node
lowercase__ = current_node
lowercase__ = current_node
for _ in range(1 , UpperCamelCase ):
lowercase__ = Node()
lowercase__ = current_node
lowercase__ = previous_node
lowercase__ = current_node
lowercase__ = self.front
lowercase__ = previous_node
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def UpperCamelCase__ (self : Any , UpperCamelCase : Any ):
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase__ = self.rear.next
if self.rear:
lowercase__ = data
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase__ = self.front.data
lowercase__ = None
return data
lowercase__ = self.front
lowercase__ = old_front.next
lowercase__ = old_front.data
lowercase__ = None
return data
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
if self.is_empty():
raise Exception('''Empty Queue''' )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : Any ):
'''simple docstring'''
lowercase__ = None
lowercase__ = None
lowercase__ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 460
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : List[str] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase : str = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase : Union[str, Any] = {
"""facebook/bart-base""": 1_0_2_4,
"""facebook/bart-large""": 1_0_2_4,
"""facebook/bart-large-mnli""": 1_0_2_4,
"""facebook/bart-large-cnn""": 1_0_2_4,
"""facebook/bart-large-xsum""": 1_0_2_4,
"""yjernite/bart_eli5""": 1_0_2_4,
}
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['''input_ids''', '''attention_mask''']
_snake_case = BartTokenizer
def __init__( self , a_=None , a_=None , a_=None , a_="replace" , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=False , a_=True , **a_ , ) -> Optional[int]:
super().__init__(
a_ , a_ , tokenizer_file=a_ , errors=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , add_prefix_space=a_ , trim_offsets=a_ , **a_ , )
lowercase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a_ ) != add_prefix_space:
lowercase : str = getattr(a_ , pre_tok_state.pop("type" ) )
lowercase : Optional[Any] = add_prefix_space
lowercase : Dict = pre_tok_class(**a_ )
lowercase : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase : List[Any] = "post_processor"
lowercase : int = getattr(self.backend_tokenizer , a_ , a_ )
if tokenizer_component_instance:
lowercase : Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Optional[int] = tuple(state["sep"] )
if "cls" in state:
lowercase : List[str] = tuple(state["cls"] )
lowercase : str = False
if state.get("add_prefix_space" , a_ ) != add_prefix_space:
lowercase : Any = add_prefix_space
lowercase : str = True
if state.get("trim_offsets" , a_ ) != trim_offsets:
lowercase : int = trim_offsets
lowercase : int = True
if changes_to_apply:
lowercase : Any = getattr(a_ , state.pop("type" ) )
lowercase : Optional[int] = component_class(**a_ )
setattr(self.backend_tokenizer , a_ , a_ )
@property
def a__ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def a__ ( self , a_ ) -> Any:
lowercase : Tuple = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else value
lowercase : Tuple = value
def a__ ( self , *a_ , **a_ ) -> BatchEncoding:
lowercase : Union[str, Any] = kwargs.get("is_split_into_words" , a_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*a_ , **a_ )
def a__ ( self , *a_ , **a_ ) -> BatchEncoding:
lowercase : Optional[int] = kwargs.get("is_split_into_words" , a_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*a_ , **a_ )
def a__ ( self , a_ , a_ = None ) -> Tuple[str]:
lowercase : Optional[Any] = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
def a__ ( self , a_ , a_=None ) -> Optional[int]:
lowercase : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a__ ( self , a_ , a_ = None ) -> List[int]:
lowercase : Optional[int] = [self.sep_token_id]
lowercase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 425
|
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCAmelCase : int = yaml.safe_load(
"""\
name: \"\"
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Dataset Card for X\" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Table of Contents\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Dataset Description\"
allow_empty: false
allow_empty_text: false
subsections:
- name: \"Dataset Summary\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Supported Tasks and Leaderboards\"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
"""
)
lowerCAmelCase : Any = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
lowerCAmelCase : List[str] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowerCAmelCase : int = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowerCAmelCase : Tuple = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Extra Ignored Subsection""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
}
],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
lowerCAmelCase : Optional[Any] = """\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowerCAmelCase : Tuple = (
"""The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."""
)
lowerCAmelCase : List[str] = """\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowerCAmelCase : Optional[Any] = (
"""The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."""
)
lowerCAmelCase : Dict = """\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowerCAmelCase : List[str] = """The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."""
lowerCAmelCase : int = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowerCAmelCase : List[str] = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."""
lowerCAmelCase : Dict = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
"""
lowerCAmelCase : Optional[Any] = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."""
lowerCAmelCase : Tuple = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
"""
lowerCAmelCase : Optional[Any] = """The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."""
lowerCAmelCase : List[str] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
"""
lowerCAmelCase : Optional[int] = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."""
lowerCAmelCase : str = """\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowerCAmelCase : Optional[Any] = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."""
lowerCAmelCase : Union[str, Any] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
"""
lowerCAmelCase : List[str] = """The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."""
lowerCAmelCase : Optional[int] = """\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowerCAmelCase : Optional[int] = """The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."""
lowerCAmelCase : Union[str, Any] = """"""
lowerCAmelCase : Optional[int] = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."""
lowerCAmelCase : str = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowerCAmelCase : List[str] = """The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."""
@pytest.mark.parametrize(
"readme_md, expected_dict" ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def _A ( A ,A ) -> int:
assert ReadMe.from_string(A ,A ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def _A ( A ,A ) -> int:
with pytest.raises(A ,match=re.escape(expected_error.format(path="root" ) ) ):
lowercase : str = ReadMe.from_string(A ,A )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def _A ( A ,A ) -> List[str]:
with pytest.raises(A ,match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(A ,A )
@pytest.mark.parametrize(
"readme_md," ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def _A ( A ) -> List[str]:
ReadMe.from_string(A ,A ,suppress_parsing_errors=A )
@pytest.mark.parametrize(
"readme_md, expected_dict" ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def _A ( A ,A ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : List[str] = Path(A ) / "README.md"
with open(A ,"w+" ) as readme_file:
readme_file.write(A )
lowercase : Optional[Any] = ReadMe.from_readme(A ,A ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def _A ( A ,A ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Union[str, Any] = Path(A ) / "README.md"
with open(A ,"w+" ) as readme_file:
readme_file.write(A )
lowercase : Tuple = expected_error.format(path=A )
with pytest.raises(A ,match=re.escape(A ) ):
lowercase : Optional[Any] = ReadMe.from_readme(A ,A )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def _A ( A ,A ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Dict = Path(A ) / "README.md"
with open(A ,"w+" ) as readme_file:
readme_file.write(A )
lowercase : Dict = expected_error.format(path=A )
with pytest.raises(A ,match=re.escape(A ) ):
ReadMe.from_readme(A ,A )
@pytest.mark.parametrize(
"readme_md," ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def _A ( A ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Optional[Any] = Path(A ) / "README.md"
with open(A ,"w+" ) as readme_file:
readme_file.write(A )
ReadMe.from_readme(A ,A ,suppress_parsing_errors=A )
| 425
| 1
|
from ..utils import DummyObject, requires_backends
class a ( metaclass=__lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] = ['''torch''', '''scipy''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
| 202
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : str = logging.get_logger(__name__)
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : Any = ['''pixel_values''']
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = size if size is not None else {'''shortest_edge''': 384}
__SCREAMING_SNAKE_CASE: List[str] = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = do_resize
__SCREAMING_SNAKE_CASE: Optional[Any] = size
# Default value set here for backwards compatibility where the value in config is None
__SCREAMING_SNAKE_CASE: List[str] = crop_pct if crop_pct is not None else 224 / 256
__SCREAMING_SNAKE_CASE: List[Any] = resample
__SCREAMING_SNAKE_CASE: Any = do_rescale
__SCREAMING_SNAKE_CASE: Optional[Any] = rescale_factor
__SCREAMING_SNAKE_CASE: Any = do_normalize
__SCREAMING_SNAKE_CASE: Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE: Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE: str = size['''shortest_edge''']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__SCREAMING_SNAKE_CASE: Dict = int(shortest_edge / crop_pct )
__SCREAMING_SNAKE_CASE: Any = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_lowerCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_lowerCAmelCase , size=(shortest_edge, shortest_edge) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ):
"""simple docstring"""
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ):
"""simple docstring"""
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE: Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
__SCREAMING_SNAKE_CASE: Tuple = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE: Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE: Any = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE: Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE: Optional[int] = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE: str = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE: List[Any] = size if size is not None else self.size
__SCREAMING_SNAKE_CASE: Dict = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[str] = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE: Tuple = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE: str = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , crop_pct=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE: Any = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE: int = [self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
__SCREAMING_SNAKE_CASE: str = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
__SCREAMING_SNAKE_CASE: List[str] = {'''pixel_values''': images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 202
| 1
|
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a__ : str = logging.getLogger()
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCAmelCase = parser.parse_args()
return args.f
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __snake_case ( self : List[str] ):
UpperCAmelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(a__ )
def __snake_case ( self : List[str] , a__ : Union[str, Any] ):
UpperCAmelCase = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(a__ , 0.666 )
@slow
@require_torch_non_multi_gpu
def __snake_case ( self : List[str] ):
UpperCAmelCase = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(a__ )
UpperCAmelCase = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(a__ )
UpperCAmelCase = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(a__ )
| 570
|
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
a__ : Any = parse(importlib.metadata.version('torch'))
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Version] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}" )
UpperCAmelCase = STR_OPERATION_TO_FUNC[operation]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase = parse(importlib.metadata.version(SCREAMING_SNAKE_CASE_ ) )
return operation(SCREAMING_SNAKE_CASE_ , parse(SCREAMING_SNAKE_CASE_ ) )
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return compare_versions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 570
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_snake_case : Optional[Any] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_snake_case : Dict = "UperNetConfig"
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Union[int, Tuple[int, int]] , lowerCamelCase : Union[int, Tuple[int, int], str] = 0 , lowerCamelCase : bool = False , lowerCamelCase : Union[int, Tuple[int, int]] = 1 , ) -> None:
super().__init__()
__snake_case : Union[str, Any] = nn.Convad(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , kernel_size=lowerCamelCase , padding=lowerCamelCase , bias=lowerCamelCase , dilation=lowerCamelCase , )
__snake_case : Dict = nn.BatchNormad(lowerCamelCase )
__snake_case : List[Any] = nn.ReLU()
def __snake_case ( self : List[Any] , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
__snake_case : Dict = self.conv(lowerCamelCase )
__snake_case : int = self.batch_norm(lowerCamelCase )
__snake_case : Optional[Any] = self.activation(lowerCamelCase )
return output
class a (nn.Module ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ) -> None:
super().__init__()
__snake_case : Tuple = [
nn.AdaptiveAvgPoolad(lowerCamelCase ),
UperNetConvModule(lowerCamelCase , lowerCamelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(lowerCamelCase ) , lowerCamelCase )
def __snake_case ( self : Dict , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
__snake_case : List[str] = input
for layer in self.layers:
__snake_case : Tuple = layer(lowerCamelCase )
return hidden_state
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Tuple[int, ...] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : bool ) -> None:
super().__init__()
__snake_case : Dict = pool_scales
__snake_case : List[str] = align_corners
__snake_case : List[Any] = in_channels
__snake_case : str = channels
__snake_case : Optional[Any] = []
for i, pool_scale in enumerate(lowerCamelCase ):
__snake_case : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=lowerCamelCase , in_channels=lowerCamelCase , channels=lowerCamelCase )
self.blocks.append(lowerCamelCase )
self.add_module(str(lowerCamelCase ) , lowerCamelCase )
def __snake_case ( self : int , lowerCamelCase : torch.Tensor ) -> List[torch.Tensor]:
__snake_case : Tuple = []
for ppm in self.blocks:
__snake_case : Any = ppm(lowerCamelCase )
__snake_case : List[Any] = nn.functional.interpolate(
lowerCamelCase , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(lowerCamelCase )
return ppm_outs
class a (nn.Module ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : str , lowerCamelCase : str ) -> List[str]:
super().__init__()
__snake_case : Dict = config
__snake_case : List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
__snake_case : Tuple = in_channels
__snake_case : str = config.hidden_size
__snake_case : List[str] = False
__snake_case : Any = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__snake_case : Tuple = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__snake_case : List[str] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__snake_case : List[Any] = nn.ModuleList()
__snake_case : Dict = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__snake_case : Union[str, Any] = UperNetConvModule(lowerCamelCase , self.channels , kernel_size=1 )
__snake_case : Optional[int] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(lowerCamelCase )
self.fpn_convs.append(lowerCamelCase )
__snake_case : int = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __snake_case ( self : List[str] ) -> Optional[Any]:
self.apply(self._init_weights )
def __snake_case ( self : Tuple , lowerCamelCase : List[Any] ) -> str:
if isinstance(lowerCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __snake_case ( self : List[Any] , lowerCamelCase : Tuple ) -> Optional[int]:
__snake_case : str = inputs[-1]
__snake_case : int = [x]
psp_outs.extend(self.psp_modules(lowerCamelCase ) )
__snake_case : Tuple = torch.cat(lowerCamelCase , dim=1 )
__snake_case : Union[str, Any] = self.bottleneck(lowerCamelCase )
return output
def __snake_case ( self : int , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
# build laterals
__snake_case : Any = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(lowerCamelCase ) )
# build top-down path
__snake_case : Dict = len(lowerCamelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__snake_case : Union[str, Any] = laterals[i - 1].shape[2:]
__snake_case : Optional[int] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=lowerCamelCase , mode="bilinear" , align_corners=self.align_corners )
# build outputs
__snake_case : str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__snake_case : Tuple = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
__snake_case : str = torch.cat(lowerCamelCase , dim=1 )
__snake_case : Optional[Any] = self.fpn_bottleneck(lowerCamelCase )
__snake_case : Tuple = self.classifier(lowerCamelCase )
return output
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int = 2 , lowerCamelCase : int = 3 , lowerCamelCase : Union[int, Tuple[int, int]] = 1 ) -> None:
super().__init__()
__snake_case : List[Any] = config
__snake_case : List[str] = config.auxiliary_in_channels
__snake_case : List[Any] = config.auxiliary_channels
__snake_case : Tuple = config.auxiliary_num_convs
__snake_case : int = config.auxiliary_concat_input
__snake_case : Optional[int] = in_index
__snake_case : Tuple = (kernel_size // 2) * dilation
__snake_case : Optional[int] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=lowerCamelCase , padding=lowerCamelCase , dilation=lowerCamelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=lowerCamelCase , padding=lowerCamelCase , dilation=lowerCamelCase ) )
if self.num_convs == 0:
__snake_case : Union[str, Any] = nn.Identity()
else:
__snake_case : Any = nn.Sequential(*lowerCamelCase )
if self.concat_input:
__snake_case : int = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=lowerCamelCase , padding=kernel_size // 2 )
__snake_case : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __snake_case ( self : Dict ) -> Optional[Any]:
self.apply(self._init_weights )
def __snake_case ( self : Tuple , lowerCamelCase : Tuple ) -> Optional[int]:
if isinstance(lowerCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __snake_case ( self : Optional[int] , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
# just take the relevant feature maps
__snake_case : List[str] = encoder_hidden_states[self.in_index]
__snake_case : Optional[Any] = self.convs(lowerCamelCase )
if self.concat_input:
__snake_case : Optional[Any] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__snake_case : Union[str, Any] = self.classifier(lowerCamelCase )
return output
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = UperNetConfig
__UpperCAmelCase : int = "pixel_values"
__UpperCAmelCase : str = True
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[Any] ) -> List[Any]:
if isinstance(lowerCamelCase , lowerCamelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __snake_case ( self : Optional[Any] ) -> List[str]:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : Optional[Any]=False ) -> Dict:
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Union[str, Any] = value
_snake_case : Dict = R"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_snake_case : Tuple = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , _lowerCAmelCase , )
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase : int ) -> Optional[int]:
super().__init__(lowerCamelCase )
__snake_case : Any = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__snake_case : Union[str, Any] = UperNetHead(lowerCamelCase , in_channels=self.backbone.channels )
__snake_case : Any = UperNetFCNHead(lowerCamelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : Optional[torch.Tensor] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[torch.Tensor] = None , lowerCamelCase : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
__snake_case : Any = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case : str = output_attentions if output_attentions is not None else self.config.output_attentions
__snake_case : Tuple = self.backbone.forward_with_filtered_kwargs(
lowerCamelCase , output_hidden_states=lowerCamelCase , output_attentions=lowerCamelCase )
__snake_case : List[Any] = outputs.feature_maps
__snake_case : List[Any] = self.decode_head(lowerCamelCase )
__snake_case : List[str] = nn.functional.interpolate(lowerCamelCase , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=lowerCamelCase )
__snake_case : Optional[int] = None
if self.auxiliary_head is not None:
__snake_case : Dict = self.auxiliary_head(lowerCamelCase )
__snake_case : Dict = nn.functional.interpolate(
lowerCamelCase , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=lowerCamelCase )
__snake_case : int = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
__snake_case : Any = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__snake_case : Union[str, Any] = loss_fct(lowerCamelCase , lowerCamelCase )
__snake_case : Optional[Any] = loss_fct(lowerCamelCase , lowerCamelCase )
__snake_case : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__snake_case : Any = (logits,) + outputs[1:]
else:
__snake_case : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=lowerCamelCase , logits=lowerCamelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 81
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a (_lowerCAmelCase ):
"""simple docstring"""
def __snake_case ( self : str ) -> str:
__snake_case : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "num_attention_heads" ) )
class a :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Tuple=13 , lowerCamelCase : str=32 , lowerCamelCase : Dict=2 , lowerCamelCase : List[str]=3 , lowerCamelCase : Any=640 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Tuple="silu" , lowerCamelCase : int=3 , lowerCamelCase : Dict=32 , lowerCamelCase : str=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Dict=0.02 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Optional[int]=True , lowerCamelCase : Union[str, Any]=10 , lowerCamelCase : int=None , ) -> str:
__snake_case : Optional[Any] = parent
__snake_case : Optional[Any] = batch_size
__snake_case : Any = image_size
__snake_case : List[Any] = patch_size
__snake_case : Any = num_channels
__snake_case : Union[str, Any] = last_hidden_size
__snake_case : Any = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : Tuple = conv_kernel_size
__snake_case : Any = output_stride
__snake_case : Any = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Optional[Any] = classifier_dropout_prob
__snake_case : Union[str, Any] = use_labels
__snake_case : Optional[int] = is_training
__snake_case : Dict = num_labels
__snake_case : Any = initializer_range
__snake_case : Optional[int] = scope
def __snake_case ( self : str ) -> Union[str, Any]:
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : List[Any] = None
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __snake_case ( self : Any ) -> Union[str, Any]:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __snake_case ( self : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] ) -> Dict:
__snake_case : List[Any] = MobileViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[str] = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple ) -> List[str]:
__snake_case : str = self.num_labels
__snake_case : List[Any] = MobileViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict ) -> Dict:
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Optional[int] = MobileViTForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Tuple = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case ( self : Optional[int] ) -> List[Any]:
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = config_and_inputs
__snake_case : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : Optional[Any] = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = False
def __snake_case ( self : Optional[int] ) -> Dict:
__snake_case : Tuple = MobileViTModelTester(self )
__snake_case : Any = MobileViTConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def __snake_case ( self : Dict ) -> Any:
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def __snake_case ( self : Dict ) -> List[Any]:
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def __snake_case ( self : int ) -> Dict:
pass
def __snake_case ( self : int ) -> Union[str, Any]:
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(lowerCamelCase )
__snake_case : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : int = [*signature.parameters.keys()]
__snake_case : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __snake_case ( self : int ) -> Tuple:
pass
def __snake_case ( self : Any ) -> Tuple:
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : Any ) -> str:
def check_hidden_states_output(lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : Any ):
__snake_case : int = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : int = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : Union[str, Any] = outputs.hidden_states
__snake_case : int = 5
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : List[Any] = 2
for i in range(len(lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : List[Any] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : Any ) -> Any:
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def __snake_case ( self : List[str] ) -> List[str]:
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@slow
def __snake_case ( self : List[str] ) -> Any:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[str] = MobileViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowerCAmelCase_ ( ):
__snake_case : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : str ) -> Dict:
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def __snake_case ( self : Union[str, Any] ) -> List[str]:
__snake_case : Tuple = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(lowerCamelCase )
__snake_case : Optional[Any] = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : Dict = model(**lowerCamelCase )
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__snake_case : List[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def __snake_case ( self : str ) -> Optional[int]:
__snake_case : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : str = model.to(lowerCamelCase )
__snake_case : int = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : Optional[int] = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : List[str] = model(**lowerCamelCase )
__snake_case : Union[str, Any] = outputs.logits
# verify the logits
__snake_case : Tuple = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCamelCase )
__snake_case : Union[str, Any] = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] , device=lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def __snake_case ( self : Union[str, Any] ) -> Optional[int]:
__snake_case : Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : Tuple = model.to(lowerCamelCase )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : List[Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : Any = model(**lowerCamelCase )
__snake_case : Dict = outputs.logits.detach().cpu()
__snake_case : Any = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(50, 60)] )
__snake_case : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
__snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
__snake_case : Optional[int] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
| 81
| 1
|
from __future__ import annotations
UpperCamelCase_ = [True] * 1_0_0_0_0_0_1
UpperCamelCase_ = 2
while i * i <= 1_0_0_0_0_0_0:
if seive[i]:
for j in range(i * i, 1_0_0_0_0_0_1, i):
UpperCamelCase_ = False
i += 1
def _UpperCAmelCase ( UpperCamelCase: int ):
"""simple docstring"""
return seive[n]
def _UpperCAmelCase ( UpperCamelCase: int ):
"""simple docstring"""
return any(digit in "02468" for digit in str(UpperCamelCase ) )
def _UpperCAmelCase ( UpperCamelCase: int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
__lowerCAmelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(UpperCamelCase ) and not contains_an_even_digit(UpperCamelCase ):
__lowerCAmelCase = str(UpperCamelCase )
__lowerCAmelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(UpperCamelCase ) )]
if all(is_prime(UpperCamelCase ) for i in list_nums ):
result.append(UpperCamelCase )
return result
def _UpperCAmelCase ( ):
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'''{len(find_circular_primes()) = }''')
| 376
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
UpperCamelCase_ = False
class a ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(
image=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
__lowerCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 376
| 1
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def __magic_name__ ( ) -> Dict:
_lowercase : Optional[int] = 10
_lowercase : str = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
_lowercase : Dict = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(SCREAMING_SNAKE_CASE ) ),
} , features=SCREAMING_SNAKE_CASE , )
return dataset
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : int = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=SCREAMING_SNAKE_CASE )
return filename
# FILE_CONTENT + files
UpperCamelCase = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple:
_lowercase : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt'
_lowercase : int = FILE_CONTENT
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return filename
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]:
import bza
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
_lowercase : Optional[int] = bytes(SCREAMING_SNAKE_CASE , 'utf-8' )
with bza.open(SCREAMING_SNAKE_CASE , 'wb' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
_lowercase : Union[str, Any] = bytes(SCREAMING_SNAKE_CASE , 'utf-8' )
with gzip.open(SCREAMING_SNAKE_CASE , 'wb' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_lowercase : List[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
_lowercase : Tuple = bytes(SCREAMING_SNAKE_CASE , 'utf-8' )
with lza.frame.open(SCREAMING_SNAKE_CASE , 'wb' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_lowercase : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE , 'w' ) as archive:
archive.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
import tarfile
_lowercase : List[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
import lzma
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
_lowercase : Dict = bytes(SCREAMING_SNAKE_CASE , 'utf-8' )
with lzma.open(SCREAMING_SNAKE_CASE , 'wb' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
import zipfile
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
_lowercase : List[Any] = bytes(SCREAMING_SNAKE_CASE , 'utf-8' )
with zstd.open(SCREAMING_SNAKE_CASE , 'wb' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'file.xml'
_lowercase : List[str] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return filename
UpperCamelCase = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
UpperCamelCase = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
UpperCamelCase = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
UpperCamelCase = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
UpperCamelCase = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def __magic_name__ ( ) -> Dict:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : Dict = datasets.Dataset.from_dict(SCREAMING_SNAKE_CASE )
_lowercase : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(SCREAMING_SNAKE_CASE ) ) as con:
_lowercase : Union[str, Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(SCREAMING_SNAKE_CASE , 'w' , newline='' ) as f:
_lowercase : Dict = csv.DictWriter(SCREAMING_SNAKE_CASE , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(SCREAMING_SNAKE_CASE , 'w' , newline='' ) as f:
_lowercase : Union[str, Any] = csv.DictWriter(SCREAMING_SNAKE_CASE , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
import bza
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
_lowercase : str = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(SCREAMING_SNAKE_CASE , 'wb' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
_lowercase : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
_lowercase : str = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(SCREAMING_SNAKE_CASE , 'wb' ) as f:
_lowercase : List[Any] = pq.ParquetWriter(SCREAMING_SNAKE_CASE , schema=SCREAMING_SNAKE_CASE )
_lowercase : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(SCREAMING_SNAKE_CASE ) )] for k in DATA[0]} , schema=SCREAMING_SNAKE_CASE )
writer.write_table(SCREAMING_SNAKE_CASE )
writer.close()
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : Dict = {'data': DATA}
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : Optional[Any] = {'data': DATA_DICT_OF_LISTS}
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
for item in DATA:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) + '\n' )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : int = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
for item in DATA:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) + '\n' )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) + '\n' )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str:
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) + '\n' )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
import gzip
_lowercase : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(SCREAMING_SNAKE_CASE , 'rb' ) as orig_file:
with gzip.open(SCREAMING_SNAKE_CASE , 'wb' ) as zipped_file:
zipped_file.writelines(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
import gzip
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(SCREAMING_SNAKE_CASE , 'rb' ) as orig_file:
with gzip.open(SCREAMING_SNAKE_CASE , 'wb' ) as zipped_file:
zipped_file.writelines(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
_lowercase : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('nested' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.join('nested' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple:
_lowercase : str = ['0', '1', '2', '3']
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]:
_lowercase : Optional[int] = ['0', '1', '2', '3']
_lowercase : int = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : Tuple = ['0', '1', '2', '3']
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename('unsupported.ext' ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( ) -> Any:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def __magic_name__ ( ) -> Optional[Any]:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Dict = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 66
|
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
_lowercase : Optional[Any] = 4
_lowercase : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
_lowercase : Union[str, Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 66
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=4 , ) -> str:
A_ : Union[str, Any] = parent
A_ : Union[str, Any] = batch_size
A_ : Optional[int] = seq_length
A_ : Optional[Any] = is_training
A_ : List[str] = use_attention_mask
A_ : Optional[int] = use_token_type_ids
A_ : str = use_labels
A_ : Optional[Any] = vocab_size
A_ : Any = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : str = intermediate_size
A_ : Dict = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : Dict = type_vocab_size
A_ : Optional[int] = type_sequence_label_size
A_ : List[Any] = initializer_range
A_ : Tuple = num_choices
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : List[str] = None
if self.use_attention_mask:
A_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Tuple = None
if self.use_token_type_ids:
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Dict = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ , A_ , A_ , A_ : Union[str, Any] = config_and_inputs
A_ : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Dict = FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
for model_class_name in self.all_model_classes:
A_ : Union[str, Any] = model_class_name.from_pretrained("""albert-base-v2""" )
A_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Dict = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
A_ : Optional[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
A_ : Any = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A_ : Tuple = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
A_ : Dict = (1, 11, 768)
self.assertEqual(output.shape , _lowerCamelCase )
A_ : Dict = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1e-4 ) )
| 385
|
'''simple docstring'''
from manim import *
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Optional[Any] = Rectangle(height=0.5 , width=0.5 )
A_ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A_ : Union[str, Any] = Rectangle(height=0.25 , width=0.25 )
A_ : Any = [mem.copy() for i in range(6 )]
A_ : Tuple = [mem.copy() for i in range(6 )]
A_ : Optional[int] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Optional[Any] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Any = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Any = Text("""CPU""" , font_size=24 )
A_ : Any = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCamelCase )
A_ : Tuple = [mem.copy() for i in range(4 )]
A_ : Optional[int] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Union[str, Any] = Text("""GPU""" , font_size=24 )
A_ : List[str] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(_lowerCamelCase )
A_ : Optional[int] = [mem.copy() for i in range(6 )]
A_ : List[Any] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : str = Text("""Model""" , font_size=24 )
A_ : Any = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(_lowerCamelCase )
A_ : List[Any] = []
A_ : str = []
for i, rect in enumerate(_lowerCamelCase ):
A_ : Dict = fill.copy().set_fill(_lowerCamelCase , opacity=0.8 )
target.move_to(_lowerCamelCase )
model_arr.append(_lowerCamelCase )
A_ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCamelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_lowerCamelCase )
self.add(*_lowerCamelCase , *_lowerCamelCase )
A_ : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
A_ : Tuple = [meta_mem.copy() for i in range(6 )]
A_ : List[str] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Any = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Dict = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Union[str, Any] = Text("""Disk""" , font_size=24 )
A_ : Union[str, Any] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_lowerCamelCase , _lowerCamelCase )
A_ : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A_ : Union[str, Any] = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowerCamelCase )
A_ : List[str] = MarkupText(
F"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase ) )
A_ : Optional[int] = Square(0.3 )
input.set_fill(_lowerCamelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _lowerCamelCase , buff=0.5 )
self.play(Write(_lowerCamelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_lowerCamelCase , buff=0.02 )
self.play(MoveToTarget(_lowerCamelCase ) )
self.play(FadeOut(_lowerCamelCase ) )
A_ : Optional[int] = Arrow(start=_lowerCamelCase , end=_lowerCamelCase , color=_lowerCamelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _lowerCamelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
A_ : Union[str, Any] = MarkupText(
F"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase , run_time=3 ) )
A_ : Any = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(_lowerCamelCase ) , Circumscribe(model_arr[0] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(model_cpu_arr[0] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCamelCase , **_lowerCamelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
A_ : Tuple = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _lowerCamelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
A_ : List[str] = AnimationGroup(
FadeOut(_lowerCamelCase , run_time=0.5 ) , MoveToTarget(_lowerCamelCase , run_time=0.5 ) , FadeIn(_lowerCamelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_lowerCamelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
A_ : Any = 0.7
self.play(
Circumscribe(model_arr[i] , **_lowerCamelCase ) , Circumscribe(cpu_left_col_base[i] , **_lowerCamelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(model_arr[i + 1] , color=_lowerCamelCase , **_lowerCamelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCamelCase , **_lowerCamelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
A_ : Any = a_c
A_ : Dict = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_lowerCamelCase ) , FadeOut(_lowerCamelCase , run_time=0.5 ) , )
A_ : Tuple = MarkupText(F"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase , run_time=3 ) , MoveToTarget(_lowerCamelCase ) )
self.wait()
| 385
| 1
|
'''simple docstring'''
from math import pow, sqrt
def lowercase_ ( *__A : float ) -> bool:
"""simple docstring"""
lowercase : Dict =len(__A ) > 0 and all(value > 0.0 for value in values )
return result
def lowercase_ ( __A : float , __A : float ) -> float | ValueError:
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def lowercase_ ( __A : float , __A : float , __A : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A , __A )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowercase_ ( __A : float , __A : float , __A : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A , __A )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowercase_ ( __A : float , __A : float , __A : float ) -> float | ValueError:
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__A , __A , __A )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowercase_ ( __A : float , __A : float , __A : float ) -> float | ValueError:
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__A , __A , __A )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 94
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[Any] ={
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str =[
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 135
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ):
'''simple docstring'''
if attention_mask is None:
a_ =tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class UpperCAmelCase :
'''simple docstring'''
__magic_name__ : Tuple = OPTConfig
__magic_name__ : Any = {}
__magic_name__ : Tuple = "gelu"
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1_3 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=9_9 , lowerCAmelCase_=1_6 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=4 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=2_0 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=1_6 , lowerCAmelCase_=1_6 , ) -> Optional[int]:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =seq_length
a_ =is_training
a_ =use_labels
a_ =vocab_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_attention_heads
a_ =intermediate_size
a_ =hidden_act
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =eos_token_id
a_ =pad_token_id
a_ =bos_token_id
a_ =embed_dim
a_ =word_embed_proj_dim
a_ =False
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
a_ =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
a_ =tf.concat([input_ids, eos_tensor] , axis=1)
a_ =self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase_ , **self.config_updates , )
a_ =prepare_opt_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ =TFOPTModel(config=lowerCAmelCase_)
a_ =inputs_dict["input_ids"]
a_ =input_ids[:1, :]
a_ =inputs_dict["attention_mask"][:1, :]
a_ =1
# first forward pass
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_)
a_ , a_ =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a_ =ids_tensor((self.batch_size, 3) , config.vocab_size)
a_ =tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
a_ =tf.concat([input_ids, next_tokens] , axis=-1)
a_ =tf.concat([attention_mask, next_attn_mask] , axis=-1)
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_)[0]
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
a_ =int(ids_tensor((1,) , output_from_past.shape[-1]))
a_ =output_from_no_past[:, -3:, random_slice_idx]
a_ =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1e-3)
@require_tf
class UpperCAmelCase ( __a , __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Tuple = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__magic_name__ : Dict = (TFOPTForCausalLM,) if is_tf_available() else ()
__magic_name__ : Tuple = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
__magic_name__ : List[Any] = False
__magic_name__ : List[str] = False
__magic_name__ : Dict = False
__magic_name__ : Tuple = 10
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =TFOPTModelTester(self)
a_ =ConfigTester(self , config_class=lowerCAmelCase_)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase_ , lowerCAmelCase_):
if hasattr(lowerCAmelCase_ , "weight"):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase_ , "weight"):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
a_ =model_class(config=lowerCAmelCase_)
a_ =_get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings())
a_ =_get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings())
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase_)
a_ =_get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings())
a_ =_get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings())
# check that the resized embeddings size matches the desired size.
a_ =size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase_)
# check that weights remain the same after resizing
a_ =True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0:
a_ =False
self.assertTrue(lowerCAmelCase_)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase_)
a_ =True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0:
a_ =False
self.assertTrue(lowerCAmelCase_)
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return tf.constant(lowercase__ , dtype=tf.intaa )
@require_tf
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[Any] = 99
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =tf.ones((4, 1) , dtype=tf.intaa) * 2
a_ =tf.concat([ids_tensor((4, 6) , self.vocab_size - 3) + 3, eos_column_vector] , axis=1)
a_ =input_ids.shape[0]
a_ =OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =TFOPTModel.from_pretrained("facebook/opt-350m")
a_ =_long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]])
a_ =tf.not_equal(lowerCAmelCase_ , model.config.pad_token_id)
with tf.GradientTape():
a_ =model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_).last_hidden_state
a_ =(1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowerCAmelCase_)
a_ =tf.constant(
[[-0.2_8_7_3, -1.9_2_1_8, -0.3_0_3_3], [-1.2_7_1_0, -0.1_3_3_8, -0.1_9_0_2], [0.4_0_9_5, 0.1_2_1_4, -1.3_1_2_1]])
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-3))
a_ =tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_)
a_ =xla_generate(lowerCAmelCase_ , lowerCAmelCase_)[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-2))
@require_tf
@slow
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
super().setUp()
a_ ="facebook/opt-350m"
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =TFOPTForCausalLM.from_pretrained(self.path_model)
a_ =GPTaTokenizer.from_pretrained(self.path_model)
a_ =[
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
a_ =tokenizer(lowerCAmelCase_ , return_tensors="tf" , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
a_ =tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1)
a_ =tf.constant(
[
[1.3_8_5_1, -1_3.8_9_2_3, -1_0.5_2_2_9, -1_0.7_5_3_3, -0.2_3_0_9, -1_0.2_3_8_4, -0.5_3_6_5, -9.0_9_4_7, -5.1_6_7_0],
[-4.7_0_7_3, -1_0.6_2_7_6, -3.9_4_1_5, -2_1.5_2_4_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2],
[0.6_2_4_7, -3.4_2_2_9, -8.9_1_7_9, -1.4_2_9_7, -1_4.1_6_5_0, 1.4_1_4_6, -9.0_2_1_8, -0.2_7_0_3, -0.2_7_0_3],
[6.4_7_8_3, -1.9_9_1_3, -1_0.7_9_2_6, -2.3_3_3_6, 1.5_0_9_2, -0.9_9_7_4, -6.8_2_1_3, 1.3_4_7_7, 1.3_4_7_7],
])
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4))
a_ =tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_)
a_ =tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1)
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4))
@require_tf
@slow
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ ="facebook/opt-125m"
a_ =[
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
a_ =[]
a_ =GPTaTokenizer.from_pretrained(lowerCAmelCase_)
a_ =TFOPTForCausalLM.from_pretrained(lowerCAmelCase_)
for prompt in self.prompts:
a_ =tokenizer(lowerCAmelCase_ , return_tensors="tf").input_ids
a_ =model.generate(lowerCAmelCase_ , max_length=1_0)
a_ =tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ ="facebook/opt-350m"
a_ =GPTaTokenizer.from_pretrained(lowerCAmelCase_)
a_ =TFOPTForCausalLM.from_pretrained(lowerCAmelCase_)
a_ ="left"
# use different length sentences to test batching
a_ =[
"Hello, my dog is a little",
"Today, I",
]
a_ =tokenizer(lowerCAmelCase_ , return_tensors="tf" , padding=lowerCAmelCase_)
a_ =inputs["input_ids"]
a_ =model.generate(input_ids=lowerCAmelCase_ , attention_mask=inputs["attention_mask"])
a_ =tokenizer(sentences[0] , return_tensors="tf").input_ids
a_ =model.generate(input_ids=lowerCAmelCase_)
a_ =inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa))
a_ =tokenizer(sentences[1] , return_tensors="tf").input_ids
a_ =model.generate(input_ids=lowerCAmelCase_ , max_length=model.config.max_length - num_paddings)
a_ =tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
a_ =tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase_)
a_ =tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase_)
a_ =[
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , [non_padded_sentence, padded_sentence])
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ ="facebook/opt-350m"
a_ =[
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
a_ =[]
a_ =GPTaTokenizer.from_pretrained(lowerCAmelCase_)
a_ =TFOPTForCausalLM.from_pretrained(lowerCAmelCase_)
for prompt in self.prompts:
a_ =tokenizer(lowerCAmelCase_ , return_tensors="tf").input_ids
a_ =model.generate(lowerCAmelCase_ , max_length=1_0)
a_ =tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
| 41
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''',
'''Salesforce/blip-vqa-capfit-large''': (
'''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-base''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-large''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'''
),
'''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''',
'''Salesforce/blip-itm-large-flikr''': (
'''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'''
),
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = "blip_text_model"
def __init__( self , _a=3_0_5_2_4 , _a=7_6_8 , _a=7_6_8 , _a=3_0_7_2 , _a=7_6_8 , _a=1_2 , _a=8 , _a=5_1_2 , _a="gelu" , _a=1e-1_2 , _a=0.0 , _a=0.0 , _a=0.02 , _a=3_0_5_2_2 , _a=2 , _a=0 , _a=1_0_2 , _a=True , _a=True , **_a , ) -> Union[str, Any]:
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , sep_token_id=_a , **_a , )
_a : Union[str, Any] = vocab_size
_a : List[Any] = hidden_size
_a : str = encoder_hidden_size
_a : Any = intermediate_size
_a : Tuple = projection_dim
_a : List[str] = hidden_dropout_prob
_a : int = num_hidden_layers
_a : int = num_attention_heads
_a : int = max_position_embeddings
_a : Any = layer_norm_eps
_a : Optional[int] = hidden_act
_a : Union[str, Any] = initializer_range
_a : Tuple = attention_probs_dropout_prob
_a : Dict = is_decoder
_a : Tuple = use_cache
@classmethod
def __lowercase ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
_a , _a : Optional[int] = cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
_a : Optional[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = "blip_vision_model"
def __init__( self , _a=7_6_8 , _a=3_0_7_2 , _a=5_1_2 , _a=1_2 , _a=1_2 , _a=3_8_4 , _a=1_6 , _a="gelu" , _a=1e-5 , _a=0.0 , _a=1e-1_0 , **_a , ) -> Optional[Any]:
super().__init__(**_a )
_a : str = hidden_size
_a : List[str] = intermediate_size
_a : Optional[int] = projection_dim
_a : Dict = num_hidden_layers
_a : Dict = num_attention_heads
_a : Union[str, Any] = patch_size
_a : List[Any] = image_size
_a : Tuple = initializer_range
_a : int = attention_dropout
_a : Optional[Any] = layer_norm_eps
_a : List[Any] = hidden_act
@classmethod
def __lowercase ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
_a , _a : Union[str, Any] = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
_a : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = "blip"
UpperCAmelCase__ : str = True
def __init__( self , _a=None , _a=None , _a=5_1_2 , _a=2.6592 , _a=2_5_6 , **_a , ) -> str:
super().__init__(**_a )
if text_config is None:
_a : List[str] = {}
logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' )
if vision_config is None:
_a : str = {}
logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' )
_a : Optional[Any] = BlipTextConfig(**_a )
_a : Optional[int] = BlipVisionConfig(**_a )
_a : str = self.vision_config.hidden_size
_a : Union[str, Any] = projection_dim
_a : str = logit_scale_init_value
_a : str = 1.0
_a : int = 0.02
_a : Optional[int] = image_text_hidden_size
@classmethod
def __lowercase ( cls , _a , _a , **_a ) -> Optional[Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a )
def __lowercase ( self ) -> Optional[int]:
_a : Tuple = copy.deepcopy(self.__dict__ )
_a : str = self.text_config.to_dict()
_a : List[str] = self.vision_config.to_dict()
_a : List[Any] = self.__class__.model_type
return output
| 14
|
from scipy.stats import spearmanr
import datasets
a__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def __lowercase ( self , _a , _a , _a=False ) -> str:
_a : int = spearmanr(_a , _a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 14
| 1
|
'''simple docstring'''
__UpperCAmelCase = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def _snake_case ( A ) -> int:
lowerCAmelCase__ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowerCAmelCase__ = Stack()
lowerCAmelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(A ) )
elif i in operators:
# RULE 2
operator_stack.push(A )
elif i == ")":
# RULE 4
lowerCAmelCase__ = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase__ = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ = operators[opr](A , A )
operand_stack.push(A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__UpperCAmelCase = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 713
|
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCAmelCase = logging.getLogger(__name__)
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None ) -> Dict:
super().__init__(
lowerCamelCase_ , question_encoder_tokenizer=lowerCamelCase_ , generator_tokenizer=lowerCamelCase_ , index=lowerCamelCase_ , init_retrieval=lowerCamelCase_ , )
lowerCAmelCase__ = None
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
lowerCAmelCase__ = self._infer_socket_ifname()
# avoid clash with the NCCL port
lowerCAmelCase__ = str(distributed_port + 1 )
lowerCAmelCase__ = dist.new_group(ranks=lowerCamelCase_ , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
return dist.get_rank(group=self.process_group ) == 0
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=torch.floataa ) -> Union[str, Any]:
lowerCAmelCase__ = torch.empty(lowerCamelCase_ , dtype=lowerCamelCase_ )
dist.scatter(lowerCamelCase_ , src=0 , scatter_list=lowerCamelCase_ , group=self.process_group )
return target_tensor
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
lowerCAmelCase__ = next((addr for addr in addrs if addr.startswith('''e''' )) , lowerCamelCase_ )
return ifname
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
lowerCAmelCase__ , lowerCAmelCase__ = self._main_retrieve(lowerCamelCase_ , lowerCamelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCamelCase_ )
# distributed training
lowerCAmelCase__ = dist.get_world_size(group=self.process_group )
# gather logic
lowerCAmelCase__ = None
if self._is_main():
lowerCAmelCase__ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowerCamelCase_ )]
dist.gather(torch.tensor(lowerCamelCase_ ) , dst=0 , gather_list=lowerCamelCase_ , group=self.process_group )
# scatter logic
lowerCAmelCase__ = question_hidden_states.shape[0]
lowerCAmelCase__ = []
lowerCAmelCase__ = []
if self._is_main():
assert len(lowerCamelCase_ ) == world_size
lowerCAmelCase__ , lowerCAmelCase__ = self._main_retrieve(torch.cat(lowerCamelCase_ ).numpy() , lowerCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = torch.tensor(lowerCamelCase_ ), torch.tensor(lowerCamelCase_ )
lowerCAmelCase__ = self._chunk_tensor(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = self._chunk_tensor(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = self._scattered(lowerCamelCase_ , [n_queries, n_docs] , target_type=torch.intaa )
lowerCAmelCase__ = self._scattered(lowerCamelCase_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCamelCase_ )
| 98
| 0
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class a ( datasets.BeamBasedBuilder ):
def lowerCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=__UpperCamelCase , )
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase )-> Dict:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase )-> Optional[Any]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__UpperCamelCase )
class a ( datasets.BeamBasedBuilder ):
def lowerCAmelCase_ ( self )-> str:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=__UpperCamelCase , )
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase )-> List[str]:
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase )-> str:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class a ( UpperCamelCase_ ):
@require_beam
def lowerCAmelCase_ ( self )-> Any:
'''simple docstring'''
A__ : List[Any] =len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : List[str] =DummyBeamDataset(cache_dir=__UpperCamelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
A__ : List[Any] =builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __UpperCamelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __UpperCamelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowerCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
import apache_beam as beam
A__ : Optional[Any] =beam.io.parquetio.WriteToParquet
A__ : Optional[Any] =len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : Union[str, Any] =DummyBeamDataset(cache_dir=__UpperCamelCase , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
A__ : Dict =partial(__UpperCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
A__ : int =builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __UpperCamelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __UpperCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(__UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowerCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : Tuple =DummyBeamDataset(cache_dir=__UpperCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowerCAmelCase_ ( self )-> int:
'''simple docstring'''
A__ : List[str] =len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int =NestedBeamDataset(cache_dir=__UpperCamelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
A__ : str =builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __UpperCamelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __UpperCamelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 416
|
import math
class a :
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase )-> int:
'''simple docstring'''
A__ : str =0.0
A__ : Optional[Any] =0.0
for i in range(len(__UpperCamelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> list[list[int | float]]:
'''simple docstring'''
for i in range(len(__UpperCamelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def SCREAMING_SNAKE_CASE__ ( ) -> None:
# Training Examples ( m, n )
A__ : List[Any] =[[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
A__ : Any =[[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
A__ : List[str] =SelfOrganizingMap()
A__ : Dict =3
A__ : Optional[int] =0.5
for _ in range(snake_case_ ):
for j in range(len(snake_case_ ) ):
# training sample
A__ : str =training_samples[j]
# Compute the winning vector
A__ : Tuple =self_organizing_map.get_winner(snake_case_, snake_case_ )
# Update the winning vector
A__ : Optional[int] =self_organizing_map.update(snake_case_, snake_case_, snake_case_, snake_case_ )
# classify test sample
A__ : Optional[int] =[0, 0, 0, 1]
A__ : Any =self_organizing_map.get_winner(snake_case_, snake_case_ )
# results
print(f'Clusters that the test sample belongs to : {winner}' )
print(f'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 416
| 1
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = TextToVideoSDPipeline
a__ = TEXT_TO_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a__ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def A__ ( self):
torch.manual_seed(0)
_UpperCamelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
_UpperCamelCase : List[str] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0)
_UpperCamelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0)
_UpperCamelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
_UpperCamelCase : Any = CLIPTextModel(__snake_case)
_UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_UpperCamelCase : Tuple = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def A__ ( self , __snake_case , __snake_case=0):
if str(__snake_case).startswith('mps'):
_UpperCamelCase : Optional[Any] = torch.manual_seed(__snake_case)
else:
_UpperCamelCase : Dict = torch.Generator(device=__snake_case).manual_seed(__snake_case)
_UpperCamelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def A__ ( self):
_UpperCamelCase : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : List[str] = self.get_dummy_components()
_UpperCamelCase : Optional[int] = TextToVideoSDPipeline(**__snake_case)
_UpperCamelCase : Any = sd_pipe.to(__snake_case)
sd_pipe.set_progress_bar_config(disable=__snake_case)
_UpperCamelCase : Any = self.get_dummy_inputs(__snake_case)
_UpperCamelCase : Dict = 'np'
_UpperCamelCase : List[Any] = sd_pipe(**__snake_case).frames
_UpperCamelCase : Any = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_UpperCamelCase : Dict = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def A__ ( self):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__snake_case , expected_max_diff=3e-3)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def A__ ( self):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__snake_case , expected_max_diff=1e-2)
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.')
def A__ ( self):
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.')
def A__ ( self):
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.')
def A__ ( self):
pass
def A__ ( self):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
_UpperCamelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy')
_UpperCamelCase : Any = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b')
_UpperCamelCase : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_UpperCamelCase : Optional[int] = pipe.to('cuda')
_UpperCamelCase : List[str] = 'Spiderman is surfing'
_UpperCamelCase : str = torch.Generator(device='cpu').manual_seed(0)
_UpperCamelCase : Any = pipe(__snake_case , generator=__snake_case , num_inference_steps=25 , output_type='pt').frames
_UpperCamelCase : Union[str, Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5e-2
def A__ ( self):
_UpperCamelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy')
_UpperCamelCase : List[str] = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b')
_UpperCamelCase : List[str] = pipe.to('cuda')
_UpperCamelCase : List[Any] = 'Spiderman is surfing'
_UpperCamelCase : Tuple = torch.Generator(device='cpu').manual_seed(0)
_UpperCamelCase : int = pipe(__snake_case , generator=__snake_case , num_inference_steps=2 , output_type='pt').frames
_UpperCamelCase : int = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5e-2
| 718
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : int = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Any = use_cache
_UpperCamelCase : Any = classifier_dropout
class lowercase ( _lowercase ):
"""simple docstring"""
@property
def A__ ( self):
if self.task == "multiple-choice":
_UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 648
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = '''trocr'''
UpperCamelCase_ : Optional[Any] = ['''past_key_values''']
UpperCamelCase_ : List[str] = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self : str , lowerCAmelCase__ : str=5_0_2_6_5 , lowerCAmelCase__ : Any=1_0_2_4 , lowerCAmelCase__ : Any=1_2 , lowerCAmelCase__ : Union[str, Any]=1_6 , lowerCAmelCase__ : List[str]=4_0_9_6 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : List[Any]=5_1_2 , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Union[str, Any]=0.02 , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : List[str]=1 , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : Optional[int]=2 , **lowerCAmelCase__ : List[str] , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : List[str] = d_model
_UpperCAmelCase : Dict = decoder_layers
_UpperCAmelCase : Optional[int] = decoder_attention_heads
_UpperCAmelCase : List[str] = decoder_ffn_dim
_UpperCAmelCase : Any = activation_function
_UpperCAmelCase : List[Any] = max_position_embeddings
_UpperCAmelCase : Any = dropout
_UpperCAmelCase : List[Any] = attention_dropout
_UpperCAmelCase : Dict = activation_dropout
_UpperCAmelCase : Any = init_std
_UpperCAmelCase : List[str] = decoder_layerdrop
_UpperCAmelCase : List[str] = use_cache
_UpperCAmelCase : Dict = scale_embedding
_UpperCAmelCase : Tuple = use_learned_position_embeddings
_UpperCAmelCase : Union[str, Any] = layernorm_embedding
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 494
|
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__a = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
__a = 'hopper-medium-v2'
__a = gym.make(env_name)
__a = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
__a = env.reset()
__a = 0
__a = 0
__a = 1_000
__a = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__a = pipeline(obs, planning_horizon=32)
# execute action in environment
__a , __a , __a , __a = env.step(denorm_actions)
__a = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
__a = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}')
| 494
| 1
|
"""simple docstring"""
from __future__ import annotations
__snake_case = list[tuple[int, int]]
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> str:
'''simple docstring'''
snake_case : int = pos_x
snake_case : List[str] = pos_y
snake_case : List[Any] = (pos_y, pos_x)
snake_case : Optional[int] = goal_x
snake_case : Dict = goal_y
snake_case : Any = g_cost
snake_case : List[Any] = parent
snake_case : Union[str, Any] = self.calculate_heuristic()
def lowerCamelCase ( self ) -> float:
'''simple docstring'''
snake_case : Optional[Any] = abs(self.pos_x - self.goal_x )
snake_case : Dict = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , UpperCamelCase__ ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase__ )
snake_case : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , UpperCamelCase__ )
snake_case : Tuple = [self.start]
snake_case : list[Node] = []
snake_case : Dict = False
def lowerCamelCase ( self ) -> Path | None:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case : str = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
snake_case : Tuple = True
return self.retrace_path(UpperCamelCase__ )
self.closed_nodes.append(UpperCamelCase__ )
snake_case : Optional[Any] = self.get_successors(UpperCamelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase__ )
else:
# retrieve the best current path
snake_case : Dict = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase__ )
else:
self.open_nodes.append(UpperCamelCase__ )
if not self.reached:
return [self.start.pos]
return None
def lowerCamelCase ( self , UpperCamelCase__ ) -> list[Node]:
'''simple docstring'''
snake_case : Dict = []
for action in delta:
snake_case : Union[str, Any] = parent.pos_x + action[1]
snake_case : str = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase__ , UpperCamelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase__ , ) )
return successors
def lowerCamelCase ( self , UpperCamelCase__ ) -> Path:
'''simple docstring'''
snake_case : Optional[int] = node
snake_case : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case : Any = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__snake_case = (0, 0)
__snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
__snake_case = GreedyBestFirst(init, goal)
__snake_case = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__snake_case = 2
for elem in grid:
print(elem)
| 117
|
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def __lowerCAmelCase ( lowercase : int , lowercase : int , lowercase : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
snake_case : Dict = b * b - 4 * a * c
snake_case : Tuple = (-b + sqrt(lowercase )) / (2 * a)
snake_case : Optional[int] = (-b - sqrt(lowercase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
snake_case ,snake_case : Optional[Any] = quadratic_roots(a=5 , b=6 , c=1 )
print(F'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 117
| 1
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = abs(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = 0
while n > 0:
res += n % 10
n //= 10
return res
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = abs(__UpperCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return sum(int(__UpperCAmelCase ) for c in str(abs(__UpperCAmelCase ) ) )
def __magic_name__ ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__UpperCAmelCase , __UpperCAmelCase ) -> None:
__SCREAMING_SNAKE_CASE = f"""{func.__name__}({value})"""
__SCREAMING_SNAKE_CASE = timeit(f"""__main__.{call}""" , setup="""import __main__""" )
print(f"""{call:56} = {func(__UpperCAmelCase )} -- {timing:.4f} seconds""" )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__UpperCAmelCase , __UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 109
|
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ComputeEnvironment.AMAZON_SAGEMAKER
_snake_case = True
_snake_case = """ml.p3.2xlarge"""
_snake_case = """accelerate_sagemaker_execution_role"""
_snake_case = """hf-sm"""
_snake_case = """us-east-1"""
_snake_case = 1
_snake_case = """accelerate-sagemaker-1"""
_snake_case = """1.6"""
_snake_case = """4.4"""
_snake_case = """train.py"""
_snake_case = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
_snake_case = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
snake_case : List[str] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] , A )
assert isinstance(converted_args["""do_train"""] , A )
assert isinstance(converted_args["""epochs"""] , A )
assert isinstance(converted_args["""learning_rate"""] , A )
assert isinstance(converted_args["""max_steps"""] , A )
with pytest.raises(A ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 587
| 0
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
snake_case_ : Union[str, Any] = pytest.mark.integration
@require_faiss
class snake_case_ ( __A ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
lowerCamelCase_ : Dict = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__magic_name__ ) for x in np.arange(30 ).tolist()]} )
return dset
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
import faiss
lowerCamelCase_ : Dataset = self._create_dummy_dataset()
lowerCamelCase_ : int = dset.map(
lambda __magic_name__ , __magic_name__ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__magic_name__ , keep_in_memory=__magic_name__ )
lowerCamelCase_ : int = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ , lowerCamelCase_ : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
import faiss
lowerCamelCase_ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase_ , lowerCamelCase_ : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
import faiss
lowerCamelCase_ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__magic_name__ ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
lowerCamelCase_ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(__magic_name__ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
from elasticsearch import Elasticsearch
lowerCamelCase_ : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ : List[Any] = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase_ : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCamelCase_ : str = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=__magic_name__ )
lowerCamelCase_ , lowerCamelCase_ : List[str] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class snake_case_ ( __A ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
import faiss
lowerCamelCase_ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase_ : List[str] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ : Any = 1
lowerCamelCase_ , lowerCamelCase_ : str = index.search(__magic_name__ )
self.assertRaises(__magic_name__ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase_ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase_ , lowerCamelCase_ : str = index.search_batch(__magic_name__ )
self.assertRaises(__magic_name__ , index.search_batch , queries[0] )
lowerCamelCase_ : str = [scores[0] for scores in total_scores]
lowerCamelCase_ : str = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__magic_name__ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
import faiss
lowerCamelCase_ : Tuple = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase_ : List[str] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__magic_name__ ):
lowerCamelCase_ : int = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
import faiss
lowerCamelCase_ : int = faiss.IndexFlat(5 )
lowerCamelCase_ : List[Any] = FaissIndex(custom_index=__magic_name__ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
import faiss
lowerCamelCase_ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__magic_name__ ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase_ : List[str] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ : Optional[int] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ , lowerCamelCase_ : str = index.search(__magic_name__ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def __a ( __UpperCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
import faiss
lowerCamelCase_ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase_ : Optional[Any] = "index.faiss"
lowerCamelCase_ : List[Any] = f"mock://{index_name}"
index.save(__UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase_ : Any = FaissIndex.load(__UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase_ : Dict = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ : Dict = 1
lowerCamelCase_ , lowerCamelCase_ : List[str] = index.search(__UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class snake_case_ ( __A ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ : Optional[Any] = Elasticsearch()
lowerCamelCase_ : Tuple = {"acknowledged": True}
lowerCamelCase_ : Optional[Any] = ElasticSearchIndex(es_client=__magic_name__ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCamelCase_ : List[Any] = "foo"
lowerCamelCase_ : List[str] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ : Any = index.search(__magic_name__ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase_ : Tuple = "foo"
lowerCamelCase_ : Optional[int] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ : Optional[int] = index.search(__magic_name__ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase_ : Union[str, Any] = ["foo", "bar", "foobar"]
lowerCamelCase_ : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = index.search_batch(__magic_name__ )
lowerCamelCase_ : int = [scores[0] for scores in total_scores]
lowerCamelCase_ : Optional[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__magic_name__ ) , 0 )
self.assertListEqual([1, 1, 1] , __magic_name__ )
# batched queries with timeout
lowerCamelCase_ : Union[str, Any] = ["foo", "bar", "foobar"]
lowerCamelCase_ : str = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ : int = index.search_batch(__magic_name__ , request_timeout=30 )
lowerCamelCase_ : Union[str, Any] = [scores[0] for scores in total_scores]
lowerCamelCase_ : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__magic_name__ ) , 0 )
self.assertListEqual([1, 1, 1] , __magic_name__ )
| 253
|
import torch
from torch import nn
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : List[str]=1 , __magic_name__ : List[Any]=False ) -> str:
super().__init__()
lowerCamelCase_ : List[Any] = n_token
lowerCamelCase_ : Union[str, Any] = d_embed
lowerCamelCase_ : List[str] = d_proj
lowerCamelCase_ : Dict = cutoffs + [n_token]
lowerCamelCase_ : Any = [0] + self.cutoffs
lowerCamelCase_ : Tuple = div_val
lowerCamelCase_ : Any = self.cutoffs[0]
lowerCamelCase_ : List[str] = len(self.cutoffs ) - 1
lowerCamelCase_ : Dict = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCamelCase_ : List[Any] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCamelCase_ : List[str] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCamelCase_ : Union[str, Any] = nn.ModuleList()
lowerCamelCase_ : Optional[int] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__magic_name__ , __magic_name__ ) ) )
else:
self.out_projs.append(__magic_name__ )
self.out_layers.append(nn.Linear(__magic_name__ , __magic_name__ ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCamelCase_ , lowerCamelCase_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase_ : Union[str, Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__magic_name__ , __magic_name__ ) ) )
self.out_layers.append(nn.Linear(__magic_name__ , r_idx - l_idx ) )
lowerCamelCase_ : Any = keep_order
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Tuple ) -> Any:
if proj is None:
lowerCamelCase_ : Tuple = nn.functional.linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCamelCase_ : Optional[Any] = nn.functional.linear(__magic_name__ , proj.t().contiguous() )
lowerCamelCase_ : int = nn.functional.linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : Optional[int]=None , __magic_name__ : Any=False ) -> Tuple:
if labels is not None:
# Shift so that tokens < n predict n
lowerCamelCase_ : Union[str, Any] = hidden[..., :-1, :].contiguous()
lowerCamelCase_ : Any = labels[..., 1:].contiguous()
lowerCamelCase_ : Union[str, Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCamelCase_ : int = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
lowerCamelCase_ : str = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCamelCase_ : Optional[int] = self._compute_logit(__magic_name__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCamelCase_ : Union[str, Any] = labels != -100
lowerCamelCase_ : Optional[int] = torch.zeros_like(__magic_name__ , dtype=hidden.dtype , device=hidden.device )
lowerCamelCase_ : Optional[int] = (
-nn.functional.log_softmax(__magic_name__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCamelCase_ : int = nn.functional.log_softmax(__magic_name__ , dim=-1 )
else:
# construct weights and biases
lowerCamelCase_ , lowerCamelCase_ : Tuple = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCamelCase_ , lowerCamelCase_ : str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase_ : List[Any] = self.out_layers[0].weight[l_idx:r_idx]
lowerCamelCase_ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCamelCase_ : int = self.out_layers[i].weight
lowerCamelCase_ : Dict = self.out_layers[i].bias
if i == 0:
lowerCamelCase_ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCamelCase_ : int = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__magic_name__ )
biases.append(__magic_name__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowerCamelCase_ : Tuple = self._compute_logit(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase_ : Tuple = nn.functional.log_softmax(__magic_name__ , dim=1 )
if labels is None:
lowerCamelCase_ : Optional[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCamelCase_ : Dict = torch.zeros_like(__magic_name__ , dtype=hidden.dtype , device=hidden.device )
lowerCamelCase_ : str = 0
lowerCamelCase_ : Dict = [0] + self.cutoffs
for i in range(len(__magic_name__ ) - 1 ):
lowerCamelCase_ , lowerCamelCase_ : str = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCamelCase_ : List[str] = (labels >= l_idx) & (labels < r_idx)
lowerCamelCase_ : Optional[int] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCamelCase_ : List[Any] = labels.index_select(0 , __magic_name__ ) - l_idx
lowerCamelCase_ : int = head_logprob.index_select(0 , __magic_name__ )
lowerCamelCase_ : Tuple = hidden.index_select(0 , __magic_name__ )
else:
lowerCamelCase_ : Optional[Any] = hidden
if i == 0:
if labels is not None:
lowerCamelCase_ : Dict = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCamelCase_ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = weights[i], biases[i], self.out_projs[i]
lowerCamelCase_ : Dict = self._compute_logit(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase_ : Tuple = nn.functional.log_softmax(__magic_name__ , dim=1 )
lowerCamelCase_ : Tuple = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCamelCase_ : Optional[Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCamelCase_ : List[Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCamelCase_ : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , __magic_name__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int ) -> List[str]:
if self.n_clusters == 0:
lowerCamelCase_ : Union[str, Any] = self._compute_logit(__magic_name__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__magic_name__ , dim=-1 )
else:
# construct weights and biases
lowerCamelCase_ , lowerCamelCase_ : Any = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCamelCase_ , lowerCamelCase_ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase_ : Optional[Any] = self.out_layers[0].weight[l_idx:r_idx]
lowerCamelCase_ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCamelCase_ : Optional[int] = self.out_layers[i].weight
lowerCamelCase_ : List[Any] = self.out_layers[i].bias
if i == 0:
lowerCamelCase_ : str = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCamelCase_ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__magic_name__ )
biases.append(__magic_name__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Tuple = weights[0], biases[0], self.out_projs[0]
lowerCamelCase_ : int = self._compute_logit(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase_ : Union[str, Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCamelCase_ : Optional[int] = nn.functional.log_softmax(__magic_name__ , dim=1 )
lowerCamelCase_ : Dict = [0] + self.cutoffs
for i in range(len(__magic_name__ ) - 1 ):
lowerCamelCase_ , lowerCamelCase_ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCamelCase_ : Tuple = head_logprob[:, : self.cutoffs[0]]
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[int] = weights[i], biases[i], self.out_projs[i]
lowerCamelCase_ : Tuple = self._compute_logit(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase_ : Optional[Any] = nn.functional.log_softmax(__magic_name__ , dim=1 )
lowerCamelCase_ : Tuple = head_logprob[:, -i] + tail_logprob_i
lowerCamelCase_ : Any = logprob_i
return out
| 253
| 1
|
'''simple docstring'''
import argparse
import copy
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
_a = {}
with open(UpperCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_a = []
_list.append([line.split()[1], line.split()[2]] )
_a = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_a = []
_list.append([line.split()[0], line.split()[2]] )
_a = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
with open(UpperCamelCase ) as f:
_a = f.read(1 )
_a = start_node
_a = []
_a = start_node
_a = 0
while visiting not in first_solution:
_a = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(UpperCamelCase ) and k[0] not in first_solution:
_a = k[1]
_a = k[0]
first_solution.append(UpperCamelCase )
_a = distance_of_first_solution + int(UpperCamelCase )
_a = best_node
first_solution.append(UpperCamelCase )
_a = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_a = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def snake_case_ (UpperCamelCase : Tuple , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = []
for n in solution[1:-1]:
_a = solution.index(UpperCamelCase )
for kn in solution[1:-1]:
_a = solution.index(UpperCamelCase )
if n == kn:
continue
_a = copy.deepcopy(UpperCamelCase )
_a = kn
_a = n
_a = 0
for k in _tmp[:-1]:
_a = _tmp[_tmp.index(UpperCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_a = distance + int(i[1] )
_tmp.append(UpperCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_a = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda UpperCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ):
'''simple docstring'''
_a = 1
_a = first_solution
_a = []
_a = distance_of_first_solution
_a = solution
while count <= iters:
_a = find_neighborhood(UpperCamelCase , UpperCamelCase )
_a = 0
_a = neighborhood[index_of_best_solution]
_a = len(UpperCamelCase ) - 1
_a = False
while not found:
_a = 0
while i < len(UpperCamelCase ):
if best_solution[i] != solution[i]:
_a = best_solution[i]
_a = solution[i]
break
_a = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_a = True
_a = best_solution[:-1]
_a = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_a = cost
_a = solution
else:
_a = index_of_best_solution + 1
_a = neighborhood[index_of_best_solution]
if len(UpperCamelCase ) >= size:
tabu_list.pop(0 )
_a = count + 1
return best_solution_ever, best_cost
def snake_case_ (UpperCamelCase : str=None ):
'''simple docstring'''
_a = generate_neighbours(args.File )
_a , _a = generate_first_solution(
args.File , UpperCamelCase )
_a , _a = tabu_search(
UpperCamelCase , UpperCamelCase , UpperCamelCase , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 22
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__A = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Optional[int] , **UpperCAmelCase_ : List[Any]) ->List[str]:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
requires_backends(self , "vision")
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__(self : List[str] , UpperCAmelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCAmelCase_ : List[Any]) ->Tuple:
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , **UpperCAmelCase_ : Optional[int]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] ={}
if "candidate_labels" in kwargs:
lowerCamelCase__: Tuple =kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCamelCase__: Tuple =kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[Any]="This is a photo of {}.") ->str:
'''simple docstring'''
lowerCamelCase__: int =load_image(UpperCAmelCase_)
lowerCamelCase__: Any =self.image_processor(images=[image] , return_tensors=self.framework)
lowerCamelCase__: Any =candidate_labels
lowerCamelCase__: List[str] =[hypothesis_template.format(UpperCAmelCase_) for x in candidate_labels]
lowerCamelCase__: int =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework , padding=UpperCAmelCase_)
lowerCamelCase__: str =[text_inputs]
return inputs
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__: int =model_inputs.pop("candidate_labels")
lowerCamelCase__: List[str] =model_inputs.pop("text_inputs")
if isinstance(text_inputs[0] , UpperCAmelCase_):
lowerCamelCase__: List[Any] =text_inputs[0]
else:
# Batching case.
lowerCamelCase__: List[Any] =text_inputs[0][0]
lowerCamelCase__: List[str] =self.model(**UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: str ={
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Union[str, Any]) ->int:
'''simple docstring'''
lowerCamelCase__: List[Any] =model_outputs.pop("candidate_labels")
lowerCamelCase__: Optional[int] =model_outputs["logits"][0]
if self.framework == "pt":
lowerCamelCase__: Optional[Any] =logits.softmax(dim=-1).squeeze(-1)
lowerCamelCase__: Optional[Any] =probs.tolist()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Optional[int] =[scores]
elif self.framework == "tf":
lowerCamelCase__: List[str] =stable_softmax(UpperCAmelCase_ , axis=-1)
lowerCamelCase__: Optional[int] =probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
lowerCamelCase__: Optional[int] =[
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(UpperCAmelCase_ , UpperCAmelCase_) , key=lambda UpperCAmelCase_: -x[0])
]
return result
| 59
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_A : List[str] ={'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int =['''DPTFeatureExtractor''']
_A : Tuple =['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_A : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 705
|
'''simple docstring'''
from statistics import mean
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : Optional[int] = 0
# Number of processes finished
lowerCamelCase__ : Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCamelCase__ : Tuple = [0] * no_of_process
# List to include calculation results
lowerCamelCase__ : List[str] = [0] * no_of_process
# Sort by arrival time.
lowerCamelCase__ : Union[str, Any] = [burst_time[i] for i in np.argsort(UpperCamelCase )]
lowerCamelCase__ : List[Any] = [process_name[i] for i in np.argsort(UpperCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCamelCase__ : str = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCamelCase__ : Union[str, Any] = arrival_time[i]
lowerCamelCase__ : Any = 0
# Index showing the location of the process being performed
lowerCamelCase__ : Union[str, Any] = 0
# Saves the current response ratio.
lowerCamelCase__ : Any = 0
for i in range(0 , UpperCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCamelCase__ : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCamelCase__ : int = temp
lowerCamelCase__ : str = i
# Calculate the turn around time
lowerCamelCase__ : Optional[int] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCamelCase__ : List[str] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : int = [0] * no_of_process
for i in range(0 , UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_A : List[str] =5
_A : Optional[Any] =['''A''', '''B''', '''C''', '''D''', '''E''']
_A : Optional[int] =[1, 2, 3, 4, 5]
_A : Dict =[1, 2, 3, 4, 5]
_A : Any =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_A : Optional[int] =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}')
| 631
| 0
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
SCREAMING_SNAKE_CASE__ = '''hf-internal-testing/tiny-random-bert'''
SCREAMING_SNAKE_CASE__ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
SCREAMING_SNAKE_CASE__ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : Tuple = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(SCREAMING_SNAKE_CASE__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'refs' , 'main' ) ) as f:
__a : List[str] = f.read()
self.assertEqual(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , 'snapshots' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(os.path.isfile(SCREAMING_SNAKE_CASE__ ) )
# File is cached at the same place the second time.
__a : Any = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Using a specific revision to test the full commit hash.
__a : Tuple = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , revision='9b8c223' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , 'snapshots' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'is not a valid model identifier' ):
__a : Union[str, Any] = cached_file('tiny-random-bert' , SCREAMING_SNAKE_CASE__ )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'is not a valid git identifier' ):
__a : List[str] = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , revision='aaaa' )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'does not appear to have a file named' ):
__a : Dict = cached_file(SCREAMING_SNAKE_CASE__ , 'conf' )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'does not appear to have a file named' ):
__a : Any = cached_file(SCREAMING_SNAKE_CASE__ , 'conf' )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'refs' , 'main' ) ) as f:
__a : Any = f.read()
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , '.no_exist' , SCREAMING_SNAKE_CASE__ , 'conf' ) ) )
__a : List[Any] = cached_file(SCREAMING_SNAKE_CASE__ , 'conf' , _raise_exceptions_for_missing_entries=SCREAMING_SNAKE_CASE__ )
self.assertIsNone(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = cached_file(SCREAMING_SNAKE_CASE__ , 'conf' , local_files_only=SCREAMING_SNAKE_CASE__ , _raise_exceptions_for_missing_entries=SCREAMING_SNAKE_CASE__ )
self.assertIsNone(SCREAMING_SNAKE_CASE__ )
__a : int = mock.Mock()
__a : List[Any] = 5_0_0
__a : Dict = {}
__a : str = HTTPError
__a : Optional[int] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=SCREAMING_SNAKE_CASE__ ) as mock_head:
__a : Optional[int] = cached_file(SCREAMING_SNAKE_CASE__ , 'conf' , _raise_exceptions_for_connection_errors=SCREAMING_SNAKE_CASE__ )
self.assertIsNone(SCREAMING_SNAKE_CASE__ )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , SCREAMING_SNAKE_CASE__ ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , SCREAMING_SNAKE_CASE__ ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'is not a valid model identifier' ):
get_file_from_repo('bert-base-case' , SCREAMING_SNAKE_CASE__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'is not a valid git identifier' ):
get_file_from_repo('bert-base-cased' , SCREAMING_SNAKE_CASE__ , revision='ahaha' )
__a : Optional[Any] = get_file_from_repo('bert-base-cased' , SCREAMING_SNAKE_CASE__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
__a : Optional[Any] = json.loads(open(SCREAMING_SNAKE_CASE__ , 'r' ).read() )
self.assertEqual(config['hidden_size'] , 7_6_8 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a : List[str] = Path(SCREAMING_SNAKE_CASE__ ) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(SCREAMING_SNAKE_CASE__ , 'a.txt' ) , str(SCREAMING_SNAKE_CASE__ ) )
self.assertIsNone(get_file_from_repo(SCREAMING_SNAKE_CASE__ , 'b.txt' ) )
| 47
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : str ={'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 434
| 0
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
a_ : List[Any] =RobertaTokenizer
a_ : str =RobertaTokenizerFast
a_ : List[str] =True
a_ : Dict ={"""cls_token""": """<s>"""}
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_snake_case : List[str] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
_snake_case : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_snake_case : Union[str, Any] = {"""unk_token""": """<unk>"""}
_snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCamelCase ) )
def UpperCamelCase_ ( self : int , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase_ ( self : int , **UpperCamelCase : Tuple ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase_ ( self : Any , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Tuple = """lower newer"""
_snake_case : Dict = """lower newer"""
return input_text, output_text
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case : List[Any] = """lower newer"""
_snake_case : str = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_snake_case : Any = tokenizer.tokenize(_UpperCamelCase ) # , add_prefix_space=True)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_snake_case : Optional[Any] = tokens + [tokenizer.unk_token]
_snake_case : Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_UpperCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_UpperCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : int = self.tokenizer_class.from_pretrained('roberta-base' )
_snake_case : Optional[Any] = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCamelCase )
_snake_case : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCamelCase )
_snake_case : Dict = tokenizer.encode(
'sequence builders' , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
_snake_case : Union[str, Any] = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
_snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
_snake_case : Dict = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : str = self.get_tokenizer()
_snake_case : List[Any] = """Encode this sequence."""
_snake_case : Any = tokenizer.byte_encoder[""" """.encode('utf-8' )[0]]
# Testing encoder arguments
_snake_case : Any = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
_snake_case : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
_snake_case : List[str] = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
_snake_case : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_snake_case : Optional[Any] = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
_snake_case : Any = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
# Testing spaces after special tokens
_snake_case : Optional[Any] = """<mask>"""
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase )} ) # mask token has a left space
_snake_case : int = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
_snake_case : str = """Encode <mask> sequence"""
_snake_case : Union[str, Any] = """Encode <mask>sequence"""
_snake_case : str = tokenizer.encode(_UpperCamelCase )
_snake_case : Optional[int] = encoded.index(_UpperCamelCase )
_snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
_snake_case : Any = tokenizer.encode(_UpperCamelCase )
_snake_case : Any = encoded.index(_UpperCamelCase )
_snake_case : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
_snake_case : Dict = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
_snake_case : str = """A, <mask> AllenNLP sentence."""
_snake_case : int = tokenizer_r.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase )
_snake_case : Union[str, Any] = tokenizer_p.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case : Any = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_snake_case : int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : List[str] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_snake_case : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _UpperCamelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , _UpperCamelCase )
self.assertEqual(post_processor_state['trim_offsets'] , _UpperCamelCase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : int = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_snake_case : str = f"""{text_of_1_token} {text_of_1_token}"""
_snake_case : int = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : List[Any] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : Optional[Any] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : Optional[int] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ), len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : Optional[int] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ), len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_snake_case : Union[str, Any] = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : List[str] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ) + 1, 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : int = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ), 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_snake_case : Dict = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : Tuple = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ), 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
| 715
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: bool = True , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: bool = False , lowerCAmelCase: float = 1_00 , lowerCAmelCase: float = 0.0_1 , lowerCAmelCase: float = 1 , )-> Any:
_snake_case : int = False
_snake_case : Any = search_prob
_snake_case : Tuple = start_temperate
_snake_case : Any = []
_snake_case : List[str] = 0
_snake_case : Optional[Any] = None
while not search_end:
_snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case : Dict = current_state
scores.append(lowerCAmelCase )
iterations += 1
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case : Dict = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
_snake_case : int = neighbors.pop(lowerCAmelCase )
_snake_case : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case : Union[str, Any] = picked_neighbor
else:
_snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case : int = picked_neighbor
_snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case : List[str] = True
else:
_snake_case : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[Any] )-> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Dict:
return (3 * x**2) - (6 * y)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 669
| 0
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __A( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__(self , SCREAMING_SNAKE_CASE_ = 10_00 , SCREAMING_SNAKE_CASE_ = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(SCREAMING_SNAKE_CASE_ )
# standard deviation of the initial noise distribution
UpperCamelCase__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCamelCase__ = 4
# running values
UpperCamelCase__ = []
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase__ = num_inference_steps
UpperCamelCase__ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
UpperCamelCase__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCamelCase__ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
UpperCamelCase__ = torch.sin(steps * math.pi / 2 ) ** 2
UpperCamelCase__ = (1.0 - self.betas**2) ** 0.5
UpperCamelCase__ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
UpperCamelCase__ = timesteps.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = []
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
UpperCamelCase__ = (self.timesteps == timestep).nonzero().item()
UpperCamelCase__ = timestep_index + 1
UpperCamelCase__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(SCREAMING_SNAKE_CASE_ )
if len(self.ets ) == 1:
UpperCamelCase__ = self.ets[-1]
elif len(self.ets ) == 2:
UpperCamelCase__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCamelCase__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
UpperCamelCase__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
UpperCamelCase__ = self._get_prev_sample(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return sample
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.alphas[timestep_index]
UpperCamelCase__ = self.betas[timestep_index]
UpperCamelCase__ = self.alphas[prev_timestep_index]
UpperCamelCase__ = self.betas[prev_timestep_index]
UpperCamelCase__ = (sample - sigma * ets) / max(SCREAMING_SNAKE_CASE_ , 1E-8 )
UpperCamelCase__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__(self ):
return self.config.num_train_timesteps
| 513
|
from __future__ import annotations
import time
import numpy as np
lowerCamelCase_ = [8, 5, 9, 7]
lowerCamelCase_ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCamelCase_ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = claim_vector
UpperCamelCase__ = allocated_resources_table
UpperCamelCase__ = maximum_claim_table
def UpperCAmelCase_ (self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCAmelCase_ (self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCAmelCase_ (self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(SCREAMING_SNAKE_CASE_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCAmelCase_ (self ):
return {self.__need().index(SCREAMING_SNAKE_CASE_ ): i for i in self.__need()}
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.__need()
UpperCamelCase__ = self.__allocated_resources_table
UpperCamelCase__ = self.__available_resources()
UpperCamelCase__ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
UpperCamelCase__ = False
for each_need in need_list:
UpperCamelCase__ = True
for index, need in enumerate(SCREAMING_SNAKE_CASE_ ):
if need > available_resources[index]:
UpperCamelCase__ = False
break
if execution:
UpperCamelCase__ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCamelCase__ = original_need_index
print(F"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(SCREAMING_SNAKE_CASE_ )
# update available/freed resources stack
UpperCamelCase__ = np.array(SCREAMING_SNAKE_CASE_ ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(SCREAMING_SNAKE_CASE_ ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def UpperCAmelCase_ (self ):
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F"P{self.__allocated_resources_table.index(SCREAMING_SNAKE_CASE_ ) + 1}"
+ """ """.join(F"{it:>8}" for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F"P{self.__maximum_claim_table.index(SCREAMING_SNAKE_CASE_ ) + 1}"
+ """ """.join(F"{it:>8}" for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(SCREAMING_SNAKE_CASE_ ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(SCREAMING_SNAKE_CASE_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 513
| 1
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __lowerCAmelCase ( ) -> List[str]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def __lowerCAmelCase ( ) -> Dict:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def __lowerCAmelCase ( ) -> Any:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head("""https://huggingface.co""" )
| 702
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __lowerCAmelCase ( __lowerCamelCase : str = "laptop" ) -> DataFrame:
__lowerCAmelCase =f"""https://www.amazon.in/laptop/s?k={product}"""
__lowerCAmelCase ={
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
__lowerCAmelCase =BeautifulSoup(requests.get(__lowerCamelCase , headers=__lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
__lowerCAmelCase =DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
__lowerCAmelCase =item.ha.text
__lowerCAmelCase ="""https://www.amazon.in/""" + item.ha.a["""href"""]
__lowerCAmelCase =item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
__lowerCAmelCase =item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
__lowerCAmelCase ="""Not available"""
try:
__lowerCAmelCase =(
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
__lowerCAmelCase =""""""
try:
__lowerCAmelCase =float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 100 )
except ValueError:
__lowerCAmelCase =float("""nan""" )
except AttributeError:
pass
__lowerCAmelCase =[
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__lowerCAmelCase =""" """
__lowerCAmelCase =""" """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowercase_ = '''headphones'''
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
| 456
| 0
|
import operator as op
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = lambda UpperCAmelCase__ ,UpperCAmelCase__ : int(x / y ) # noqa: E731 integer division operation
_SCREAMING_SNAKE_CASE = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) ,'Action'.center(12 ) ,'Stack' ,sep=' | ' )
print('-' * (30 + len(UpperCAmelCase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(UpperCAmelCase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) ,('push(' + x + ')').ljust(12 ) ,','.join(UpperCAmelCase__ ) ,sep=' | ' )
else:
_SCREAMING_SNAKE_CASE = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) ,('pop(' + b + ')').ljust(12 ) ,','.join(UpperCAmelCase__ ) ,sep=' | ' )
_SCREAMING_SNAKE_CASE = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) ,('pop(' + a + ')').ljust(12 ) ,','.join(UpperCAmelCase__ ) ,sep=' | ' )
stack.append(
str(opr[x](int(UpperCAmelCase__ ) ,int(UpperCAmelCase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) ,('push(' + a + x + b + ')').ljust(12 ) ,','.join(UpperCAmelCase__ ) ,sep=' | ' ,)
return int(stack[0] )
if __name__ == "__main__":
snake_case : Tuple = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 605
|
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
def merge(UpperCAmelCase__ ,UpperCAmelCase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCAmelCase__ ) <= 1:
return collection
_SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : Any = input('Enter numbers separated by a comma:\n').strip()
snake_case : List[Any] = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 605
| 1
|
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
__UpperCAmelCase = logging.getLogger(__name__)
def lowercase__ ( lowerCAmelCase__ : str ) -> Optional[Any]:
'''simple docstring'''
a__ : Dict = git.Repo(search_parent_directories=lowerCAmelCase__ )
a__ : Optional[int] = {
"repo_id": str(lowerCAmelCase__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(lowerCAmelCase__ , "git_log.json" ) , "w" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=4 )
def lowercase__ ( lowerCAmelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
if params.n_gpu <= 0:
a__ : Tuple = 0
a__ : Union[str, Any] = -1
a__ : List[Any] = True
a__ : str = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
a__ : Optional[int] = int(os.environ["WORLD_SIZE"] )
a__ : Optional[int] = int(os.environ["N_GPU_NODE"] )
a__ : Union[str, Any] = int(os.environ["RANK"] )
# number of nodes / node ID
a__ : int = params.world_size // params.n_gpu_per_node
a__ : List[Any] = params.global_rank // params.n_gpu_per_node
a__ : Optional[int] = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
a__ : Any = 1
a__ : Tuple = 0
a__ : Tuple = 0
a__ : Tuple = 0
a__ : List[str] = 1
a__ : Dict = 1
a__ : Optional[Any] = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
a__ : List[Any] = params.node_id == 0 and params.local_rank == 0
a__ : Union[str, Any] = params.n_nodes > 1
# summary
a__ : Tuple = F"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def lowercase__ ( lowerCAmelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 700
|
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
__lowerCamelCase : Dict = CLIPTokenizer
__lowerCamelCase : Optional[Any] = CLIPTokenizerFast
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Optional[int] = {}
__lowerCamelCase : List[Any] = False
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
a__ : Tuple = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : str = dict(zip(a_ , range(len(a_ ) ) ) )
a__ : Optional[int] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
a__ : Union[str, Any] = {"unk_token": "<unk>"}
a__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def UpperCAmelCase ( self : Optional[Any] , **a_ : Tuple ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : Tuple , **a_ : Any ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : Tuple , a_ : Dict ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = "lower newer"
a__ : Dict = "lower newer"
return input_text, output_text
def UpperCAmelCase ( self : Any ) -> List[Any]:
'''simple docstring'''
a__ : List[str] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a__ : Optional[Any] = "lower newer"
a__ : Tuple = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
a__ : Tuple = tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
a__ : List[str] = tokens + [tokenizer.unk_token]
a__ : str = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
@require_ftfy
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
a__ : Dict = self.tokenizer_class.from_pretrained(a_ , **a_ )
a__ : Any = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
a__ : Optional[int] = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
a__ : str = tokenizer_s.tokenize(a_ )
a__ : int = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
a__ : Dict = "xa\u0303y" + " " + "x\xe3y"
a__ : Any = tokenizer_s.tokenize(a_ )
a__ : Optional[int] = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Test that the tokenization is identical on unicode of space type
a__ : str = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
a__ : str = tokenizer_s.tokenize(a_ )
a__ : List[Any] = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Test that the tokenization is identical on unicode of line break type
a__ : int = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
a__ : Any = tokenizer_s.tokenize(a_ )
a__ : Dict = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
a__ : Union[str, Any] = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a__ : Union[str, Any] = F"{text_of_1_token} {text_of_1_token}"
a__ : List[str] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , )
a__ : List[Any] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )) , )
a__ : List[Any] = F" {text}"
a__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , )
a__ : Tuple = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )) , )
def UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
pass
| 251
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _lowerCamelCase ( __lowerCamelCase ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = botoa.client("""iam""" )
UpperCAmelCase__ : Tuple = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowercase__ , AssumeRolePolicyDocument=json.dumps(lowercase__ , indent=2 ) )
UpperCAmelCase__ : str = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowercase__ , PolicyName=F"{role_name}_policy_permission" , PolicyDocument=json.dumps(lowercase__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"role {role_name} already exists. Using existing one" )
def _lowerCamelCase ( __lowerCamelCase ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = botoa.client("""iam""" )
return iam_client.get_role(RoleName=lowercase__ )["Role"]["Arn"]
def _lowerCamelCase ( ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Tuple = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , lowercase__ , )
UpperCAmelCase__ : str = None
if credentials_configuration == 0:
UpperCAmelCase__ : Union[str, Any] = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
UpperCAmelCase__ : Dict = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
UpperCAmelCase__ : Optional[Any] = _ask_field("""AWS Access Key ID: """ )
UpperCAmelCase__ : Tuple = aws_access_key_id
UpperCAmelCase__ : str = _ask_field("""AWS Secret Access Key: """ )
UpperCAmelCase__ : List[str] = aws_secret_access_key
UpperCAmelCase__ : Tuple = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
UpperCAmelCase__ : List[Any] = aws_region
UpperCAmelCase__ : List[str] = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , lowercase__ , )
if role_management == 0:
UpperCAmelCase__ : Union[str, Any] = _ask_field("""Enter your IAM role name: """ )
else:
UpperCAmelCase__ : Tuple = '''accelerate_sagemaker_execution_role'''
print(F"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(lowercase__ )
UpperCAmelCase__ : Dict = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowercase__ , error_message="""Please enter yes or no.""" , )
UpperCAmelCase__ : List[str] = None
if is_custom_docker_image:
UpperCAmelCase__ : Dict = _ask_field("""Enter your Docker image: """ , lambda __lowerCamelCase : str(lowercase__ ).lower() )
UpperCAmelCase__ : Any = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowercase__ , error_message="""Please enter yes or no.""" , )
UpperCAmelCase__ : Dict = None
if is_sagemaker_inputs_enabled:
UpperCAmelCase__ : Optional[int] = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda __lowerCamelCase : str(lowercase__ ).lower() , )
UpperCAmelCase__ : Optional[Any] = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowercase__ , error_message="""Please enter yes or no.""" , )
UpperCAmelCase__ : Any = None
if is_sagemaker_metrics_enabled:
UpperCAmelCase__ : Dict = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda __lowerCamelCase : str(lowercase__ ).lower() , )
UpperCAmelCase__ : Union[str, Any] = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
UpperCAmelCase__ : List[str] = {}
UpperCAmelCase__ : int = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=lowercase__ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
UpperCAmelCase__ : Union[str, Any] = '''dynamo_'''
UpperCAmelCase__ : Optional[Any] = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
UpperCAmelCase__ : List[str] = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowercase__ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
UpperCAmelCase__ : Any = _ask_options(
"""Which mode do you want to use?""" , lowercase__ , lambda __lowerCamelCase : TORCH_DYNAMO_MODES[int(lowercase__ )] , default="""default""" , )
UpperCAmelCase__ : Union[str, Any] = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowercase__ , error_message="""Please enter yes or no.""" , )
UpperCAmelCase__ : str = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowercase__ , error_message="""Please enter yes or no.""" , )
UpperCAmelCase__ : Any = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
UpperCAmelCase__ : List[str] = _ask_options(
lowercase__ , lowercase__ , lambda __lowerCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowercase__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
UpperCAmelCase__ : Union[str, Any] = _ask_field(lowercase__ , lambda __lowerCamelCase : str(lowercase__ ).lower() , default="""ml.p3.2xlarge""" )
UpperCAmelCase__ : int = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
UpperCAmelCase__ : Optional[Any] = _ask_field(
"""How many machines do you want use? [1]: """ , lowercase__ , default=1 , )
UpperCAmelCase__ : List[str] = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=lowercase__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=lowercase__ , use_cpu=lowercase__ , dynamo_config=lowercase__ , eca_instance_type=lowercase__ , profile=lowercase__ , region=lowercase__ , iam_role_name=lowercase__ , mixed_precision=lowercase__ , num_machines=lowercase__ , sagemaker_inputs_file=lowercase__ , sagemaker_metrics_file=lowercase__ , )
| 79
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''depth_multiplier''' ) )
class _lowercase :
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Dict=0.25 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple="relu6" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :Union[str, Any]=None , ) -> str:
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : List[Any] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = depth_multiplier
__SCREAMING_SNAKE_CASE : Dict = min_depth
__SCREAMING_SNAKE_CASE : List[str] = tf_padding
__SCREAMING_SNAKE_CASE : List[Any] = int(last_hidden_size * depth_multiplier )
__SCREAMING_SNAKE_CASE : List[str] = output_stride
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = scope
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def __magic_name__( self :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def __magic_name__( self :Dict ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def __magic_name__( self :List[Any] ) -> List[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def __magic_name__( self :Any ) -> Dict:
pass
def __magic_name__( self :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
__SCREAMING_SNAKE_CASE : Optional[int] = 26
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :List[str] ) -> List[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
__SCREAMING_SNAKE_CASE : int = prepare_img()
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : int = model(**lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 696
| 0
|
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=True ):
model.train()
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = F.mse_loss(__lowerCAmelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any=False ):
set_seed(42 )
lowerCamelCase__ = RegressionModel()
lowerCamelCase__ = deepcopy(__lowerCAmelCase )
lowerCamelCase__ = RegressionDataset(length=80 )
lowerCamelCase__ = DataLoader(__lowerCAmelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCamelCase__ = AdamW(params=model.parameters() , lr=1e-3 )
lowerCamelCase__ = AdamW(params=ddp_model.parameters() , lr=1e-3 )
lowerCamelCase__ = LambdaLR(__lowerCAmelCase , lr_lambda=lambda __lowerCAmelCase : epoch**0.65 )
lowerCamelCase__ = LambdaLR(__lowerCAmelCase , lr_lambda=lambda __lowerCAmelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def A__ ( __lowerCAmelCase : Any ):
# Test when on a single CPU or GPU that the context manager does nothing
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = get_training_setup(__lowerCAmelCase )
# Use a single batch
lowerCamelCase__ , lowerCamelCase__ = next(iter(__lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase__ , lowerCamelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase__ = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
def A__ ( __lowerCAmelCase : Tuple ):
# Test on distributed setup that context manager behaves properly
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = get_training_setup(__lowerCAmelCase )
# Use a single batch
lowerCamelCase__ , lowerCamelCase__ = next(iter(__lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase__ , lowerCamelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase__ = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
def A__ ( __lowerCAmelCase : int=False , __lowerCAmelCase : Optional[int]=False ):
lowerCamelCase__ = Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = get_training_setup(__lowerCAmelCase )
for iteration, batch in enumerate(__lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase__ , lowerCamelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCAmelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase__ = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
GradientState._reset_state()
def A__ ( __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=False ):
lowerCamelCase__ = Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = get_training_setup(__lowerCAmelCase , __lowerCAmelCase )
for iteration, batch in enumerate(__lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase__ , lowerCamelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCAmelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowerCamelCase__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCAmelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def A__ ( ):
lowerCamelCase__ = Accelerator()
lowerCamelCase__ = RegressionDataset(length=80 )
lowerCamelCase__ = DataLoader(__lowerCAmelCase , batch_size=16 )
lowerCamelCase__ = RegressionDataset(length=96 )
lowerCamelCase__ = DataLoader(__lowerCAmelCase , batch_size=16 )
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase )
if iteration < len(__lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase )
if batch_num < len(__lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def A__ ( ):
lowerCamelCase__ = Accelerator()
lowerCamelCase__ = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(__lowerCAmelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(__lowerCAmelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(__lowerCAmelCase , __lowerCAmelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Any ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9
|
'''simple docstring'''
import argparse
import struct
import unittest
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ):
lowerCamelCase__ = data
# Initialize hash values
lowerCamelCase__ = [
0x6a_09_e6_67,
0xbb_67_ae_85,
0x3c_6e_f3_72,
0xa5_4f_f5_3a,
0x51_0e_52_7f,
0x9b_05_68_8c,
0x1f_83_d9_ab,
0x5b_e0_cd_19,
]
# Initialize round constants
lowerCamelCase__ = [
0x42_8a_2f_98,
0x71_37_44_91,
0xb5_c0_fb_cf,
0xe9_b5_db_a5,
0x39_56_c2_5b,
0x59_f1_11_f1,
0x92_3f_82_a4,
0xab_1c_5e_d5,
0xd8_07_aa_98,
0x12_83_5b_01,
0x24_31_85_be,
0x55_0c_7d_c3,
0x72_be_5d_74,
0x80_de_b1_fe,
0x9b_dc_06_a7,
0xc1_9b_f1_74,
0xe4_9b_69_c1,
0xef_be_47_86,
0x0f_c1_9d_c6,
0x24_0c_a1_cc,
0x2d_e9_2c_6f,
0x4a_74_84_aa,
0x5c_b0_a9_dc,
0x76_f9_88_da,
0x98_3e_51_52,
0xa8_31_c6_6d,
0xb0_03_27_c8,
0xbf_59_7f_c7,
0xc6_e0_0b_f3,
0xd5_a7_91_47,
0x06_ca_63_51,
0x14_29_29_67,
0x27_b7_0a_85,
0x2e_1b_21_38,
0x4d_2c_6d_fc,
0x53_38_0d_13,
0x65_0a_73_54,
0x76_6a_0a_bb,
0x81_c2_c9_2e,
0x92_72_2c_85,
0xa2_bf_e8_a1,
0xa8_1a_66_4b,
0xc2_4b_8b_70,
0xc7_6c_51_a3,
0xd1_92_e8_19,
0xd6_99_06_24,
0xf4_0e_35_85,
0x10_6a_a0_70,
0x19_a4_c1_16,
0x1e_37_6c_08,
0x27_48_77_4c,
0x34_b0_bc_b5,
0x39_1c_0c_b3,
0x4e_d8_aa_4a,
0x5b_9c_ca_4f,
0x68_2e_6f_f3,
0x74_8f_82_ee,
0x78_a5_63_6f,
0x84_c8_78_14,
0x8c_c7_02_08,
0x90_be_ff_fa,
0xa4_50_6c_eb,
0xbe_f9_a3_f7,
0xc6_71_78_f2,
]
lowerCamelCase__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCamelCase_ ( _lowerCAmelCase ):
lowerCamelCase__ = B"""\x80""" + (B"""\x00""" * (63 - (len(_lowerCAmelCase ) + 8) % 64))
lowerCamelCase__ = struct.pack(""">Q""" ,(len(_lowerCAmelCase ) * 8) )
return data + padding + big_endian_integer
def UpperCamelCase_ ( self ):
# Convert into blocks of 64 bytes
lowerCamelCase__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCamelCase__ = list(struct.unpack(""">16L""" ,_lowerCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCamelCase__ = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
lowerCamelCase__ = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
lowerCamelCase__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
lowerCamelCase__ = self.ror(_lowerCAmelCase ,6 ) ^ self.ror(_lowerCAmelCase ,11 ) ^ self.ror(_lowerCAmelCase ,25 )
lowerCamelCase__ = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g)
lowerCamelCase__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
lowerCamelCase__ = self.ror(_lowerCAmelCase ,2 ) ^ self.ror(_lowerCAmelCase ,13 ) ^ self.ror(_lowerCAmelCase ,22 )
lowerCamelCase__ = (a & b) ^ (a & c) ^ (b & c)
lowerCamelCase__ = (sa + maj) % 0x1_00_00_00_00
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
lowerCamelCase__ = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCamelCase__ = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
lowerCamelCase__ = """""".join([hex(_lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
import hashlib
lowerCamelCase__ = bytes("""Test String""" ,"""utf-8""" )
self.assertEqual(SHAaaa(_lowerCAmelCase ).hash ,hashlib.shaaaa(_lowerCAmelCase ).hexdigest() )
def A__ ( ):
import doctest
doctest.testmod()
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = bytes(__lowerCAmelCase , """utf-8""" )
print(SHAaaa(__lowerCAmelCase ).hash )
if __name__ == "__main__":
main()
| 9
| 1
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 61
|
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: List[str] = CodeGenTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] = CodeGenTokenizerFast
SCREAMING_SNAKE_CASE_: Any = True
SCREAMING_SNAKE_CASE_: str = {"""add_prefix_space""": True}
SCREAMING_SNAKE_CASE_: Any = False
def _UpperCAmelCase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
A__ = dict(zip(__a , range(len(__a ) ) ) )
A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def _UpperCAmelCase ( self , **__a ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a )
def _UpperCAmelCase ( self , **__a ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = 'lower newer'
A__ = 'lower newer'
return input_text, output_text
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = 'lower newer'
A__ = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
A__ = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
A__ = tokens + [tokenizer.unk_token]
A__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer(add_prefix_space=__a )
A__ = 'lower newer'
# Testing tokenization
A__ = tokenizer.tokenize(__a , add_prefix_space=__a )
A__ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
A__ = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
A__ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
A__ = self.get_rust_tokenizer(add_prefix_space=__a )
A__ = tokenizer.encode(__a , add_prefix_space=__a )
A__ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
A__ = tokens + [rust_tokenizer.unk_token]
A__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def _UpperCAmelCase ( self , *__a , **__a ):
"""simple docstring"""
pass
def _UpperCAmelCase ( self , __a=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input looooooooong', 'This is a simple input']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
A__ = tokenizer.pad_token_id
A__ = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' )
A__ = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
A__ = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' )
A__ = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = '$$$'
A__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = tokenizer.bos_token_id
A__ = tokenizer(__a )
A__ = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
A__ = tokenizer.decode(out_s.input_ids )
A__ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
A__ = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
A__ = '\nif len_a > len_b: result = a\nelse: result = b'
A__ = tokenizer.encode(__a )
A__ = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
A__ = tokenizer.decode(__a , truncate_before_pattern=__a )
self.assertEqual(__a , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
pass
| 260
| 0
|
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =(DDIMParallelScheduler,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] =(("eta", 0.0), ("num_inference_steps", 50))
def _lowerCamelCase ( self : Union[str, Any] , **__A : Optional[Any] ):
__UpperCamelCase = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**__A )
return config
def _lowerCamelCase ( self : str , **__A : List[Any] ):
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config(**__A )
__UpperCamelCase = scheduler_class(**__A )
__UpperCamelCase , __UpperCamelCase = 1_0, 0.0
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(__A )
for t in scheduler.timesteps:
__UpperCamelCase = model(__A , __A )
__UpperCamelCase = scheduler.step(__A , __A , __A , __A ).prev_sample
return sample
def _lowerCamelCase ( self : List[Any] ):
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__A )
def _lowerCamelCase ( self : Any ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__A )
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config(steps_offset=1 )
__UpperCamelCase = scheduler_class(**__A )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def _lowerCamelCase ( self : Tuple ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def _lowerCamelCase ( self : Dict ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__A )
def _lowerCamelCase ( self : Any ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def _lowerCamelCase ( self : Union[str, Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__A )
def _lowerCamelCase ( self : Tuple ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__A )
def _lowerCamelCase ( self : int ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__A )
def _lowerCamelCase ( self : str ):
self.check_over_configs(thresholding=__A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__A , prediction_type=__A , sample_max_value=__A , )
def _lowerCamelCase ( self : List[str] ):
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=__A )
def _lowerCamelCase ( self : List[str] ):
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=__A , num_inference_steps=__A )
def _lowerCamelCase ( self : Optional[Any] ):
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__A , eta=__A )
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**__A )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.1_4771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.3_2460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1e-5
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**__A )
__UpperCamelCase , __UpperCamelCase = 1_0, 0.0
scheduler.set_timesteps(__A )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter
__UpperCamelCase = self.dummy_sample_deter + 0.1
__UpperCamelCase = self.dummy_sample_deter - 0.1
__UpperCamelCase = samplea.shape[0]
__UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
__UpperCamelCase = torch.arange(__A )[0:3, None].repeat(1 , __A )
__UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__UpperCamelCase = scheduler.batch_step_no_noise(__A , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __A )
__UpperCamelCase = torch.sum(torch.abs(__A ) )
__UpperCamelCase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = self.full_loop()
__UpperCamelCase = torch.sum(torch.abs(__A ) )
__UpperCamelCase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.22_3967 ) < 1e-3
def _lowerCamelCase ( self : int ):
__UpperCamelCase = self.full_loop(prediction_type='v_prediction' )
__UpperCamelCase = torch.sum(torch.abs(__A ) )
__UpperCamelCase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def _lowerCamelCase ( self : Union[str, Any] ):
# We specify different beta, so that the first alpha is 0.99
__UpperCamelCase = self.full_loop(set_alpha_to_one=__A , beta_start=0.01 )
__UpperCamelCase = torch.sum(torch.abs(__A ) )
__UpperCamelCase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def _lowerCamelCase ( self : Optional[int] ):
# We specify different beta, so that the first alpha is 0.99
__UpperCamelCase = self.full_loop(set_alpha_to_one=__A , beta_start=0.01 )
__UpperCamelCase = torch.sum(torch.abs(__A ) )
__UpperCamelCase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 712
|
'''simple docstring'''
def lowercase__ ( __lowercase : list[int] , __lowercase : list[int] ) -> None:
"""simple docstring"""
__UpperCamelCase = len(__lowercase )
print('The following activities are selected:' )
# The first activity is always selected
__UpperCamelCase = 0
print(__lowercase , end=',' )
# Consider rest of the activities
for j in range(__lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__lowercase , end=',' )
__UpperCamelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Any =[1, 3, 0, 5, 8, 5]
a__ : Dict =[2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 434
| 0
|
import requests
from bsa import BeautifulSoup
def lowercase ( __A : str = "AAPL" ) -> str:
'''simple docstring'''
snake_case : List[Any] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
snake_case : Dict = BeautifulSoup(requests.get(__A ).text , """html.parser""" )
snake_case : Optional[int] = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 36
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowercase : List[str] = ['''text''', '''image''', '''audio''']
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowercase ( __A : List ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _A :
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
snake_case : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input ,SCREAMING_SNAKE_CASE_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : str = self.tool(*SCREAMING_SNAKE_CASE_ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : Union[str, Any] = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) ,self.tool.outputs )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.outputs ):
snake_case : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = create_inputs(self.tool.inputs )
snake_case : Any = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Tuple = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Union[str, Any] = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
| 36
| 1
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase = "cpu" , __UpperCAmelCase = "openai/clip-vit-large-patch14" ):
lowerCAmelCase__ : Union[str, Any] = device
lowerCAmelCase__ : Optional[int] = CLIPTokenizerFast.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = [0.48145466, 0.4578275, 0.40821073]
lowerCAmelCase__ : Optional[Any] = [0.26862954, 0.26130258, 0.27577711]
lowerCAmelCase__ : Optional[Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
lowerCAmelCase__ : Dict = torchvision.transforms.Resize(224 )
lowerCAmelCase__ : Any = torchvision.transforms.CenterCrop(224 )
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = self.resize(__UpperCAmelCase )
lowerCAmelCase__ : str = self.center_crop(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = self.normalize(__UpperCAmelCase )
return images
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = self.tokenizer(text=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = self.preprocess_img(__UpperCAmelCase )
lowerCAmelCase__ : Any = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase=10 , __UpperCAmelCase=0.01 , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="image" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , ):
super().__init__()
lowerCAmelCase__ : str = None
lowerCAmelCase__ : str = device if device else get_device()
if vqgan:
lowerCAmelCase__ : Union[str, Any] = vqgan
else:
lowerCAmelCase__ : Dict = load_vqgan(self.device , conf_path=__UpperCAmelCase , ckpt_path=__UpperCAmelCase )
self.vqgan.eval()
if clip:
lowerCAmelCase__ : Optional[int] = clip
else:
lowerCAmelCase__ : Union[str, Any] = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
lowerCAmelCase__ : Optional[Any] = ProcessorGradientFlow(device=self.device )
lowerCAmelCase__ : Tuple = iterations
lowerCAmelCase__ : Tuple = lr
lowerCAmelCase__ : Optional[int] = log
lowerCAmelCase__ : Dict = make_grid
lowerCAmelCase__ : Optional[Any] = return_val
lowerCAmelCase__ : List[Any] = quantize
lowerCAmelCase__ : List[str] = self.vqgan.decoder.z_shape
def __magic_name__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=5 , __UpperCAmelCase=True ):
lowerCAmelCase__ : Union[str, Any] = []
if output_path is None:
lowerCAmelCase__ : Optional[Any] = '''./animation.gif'''
if input_path is None:
lowerCAmelCase__ : List[Any] = self.save_path
lowerCAmelCase__ : List[str] = sorted(glob(input_path + '''/*''' ) )
if not len(__UpperCAmelCase ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(__UpperCAmelCase ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
lowerCAmelCase__ : Optional[Any] = total_duration / len(__UpperCAmelCase )
lowerCAmelCase__ : Any = [frame_duration] * len(__UpperCAmelCase )
if extend_frames:
lowerCAmelCase__ : Any = 1.5
lowerCAmelCase__ : Optional[int] = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(__UpperCAmelCase ) )
imageio.mimsave(__UpperCAmelCase , __UpperCAmelCase , duration=__UpperCAmelCase )
print(f"""gif saved to {output_path}""" )
def __magic_name__( self , __UpperCAmelCase=None , __UpperCAmelCase=None ):
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
lowerCAmelCase__ : int = preprocess(Image.open(__UpperCAmelCase ) , target_image_size=256 ).to(self.device )
lowerCAmelCase__ : Any = preprocess_vqgan(__UpperCAmelCase )
lowerCAmelCase__ : Dict = self.vqgan.encode(__UpperCAmelCase )
return z
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = self.latent.detach().requires_grad_()
lowerCAmelCase__ : int = base_latent + transform_vector
if self.quantize:
lowerCAmelCase__ : Union[str, Any] = self.vqgan.quantize(__UpperCAmelCase )
else:
lowerCAmelCase__ : Tuple = trans_latent
return self.vqgan.decode(__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ):
lowerCAmelCase__ : List[str] = self.clip_preprocessor(text=__UpperCAmelCase , images=__UpperCAmelCase , return_tensors='''pt''' , padding=__UpperCAmelCase )
lowerCAmelCase__ : int = self.clip(**__UpperCAmelCase )
lowerCAmelCase__ : Tuple = clip_outputs.logits_per_image
if weights is not None:
lowerCAmelCase__ : Dict = similarity_logits * weights
return similarity_logits.sum()
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Dict = self._get_clip_similarity(pos_prompts['''prompts'''] , __UpperCAmelCase , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
lowerCAmelCase__ : Union[str, Any] = self._get_clip_similarity(neg_prompts['''prompts'''] , __UpperCAmelCase , weights=neg_prompts['''weights'''] )
else:
lowerCAmelCase__ : Union[str, Any] = torch.tensor([1] , device=self.device )
lowerCAmelCase__ : Tuple = -torch.log(__UpperCAmelCase ) + torch.log(__UpperCAmelCase )
return loss
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Dict = torch.randn_like(self.latent , requires_grad=__UpperCAmelCase , device=self.device )
lowerCAmelCase__ : List[Any] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
lowerCAmelCase__ : Union[str, Any] = self._add_vector(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = loop_post_process(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = self._get_CLIP_loss(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
print('''CLIP loss''' , __UpperCAmelCase )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=__UpperCAmelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
wandb.init(reinit=__UpperCAmelCase , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
lowerCAmelCase__ : int = Image.open(__UpperCAmelCase )
lowerCAmelCase__ : int = image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(__UpperCAmelCase ) )
def __magic_name__( self , __UpperCAmelCase ):
if not prompts:
return []
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : Tuple = []
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : List[str] = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(__UpperCAmelCase , (tuple, list) ):
lowerCAmelCase__ : Dict = prompt[0]
lowerCAmelCase__ : Optional[int] = float(prompt[1] )
elif ":" in prompt:
lowerCAmelCase__ : List[Any] = prompt.split(''':''' )
lowerCAmelCase__ : Any = float(__UpperCAmelCase )
else:
lowerCAmelCase__ : int = prompt
lowerCAmelCase__ : Union[str, Any] = 1.0
processed_prompts.append(__UpperCAmelCase )
weights.append(__UpperCAmelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__UpperCAmelCase , device=self.device ),
}
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=None , ):
if image_path:
lowerCAmelCase__ : Union[str, Any] = self._get_latent(__UpperCAmelCase )
else:
lowerCAmelCase__ : Union[str, Any] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
assert pos_prompts, "You must provide at least one positive prompt."
lowerCAmelCase__ : Optional[Any] = self.process_prompts(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = self.process_prompts(__UpperCAmelCase )
if save_final and save_path is None:
lowerCAmelCase__ : List[Any] = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(__UpperCAmelCase ):
os.makedirs(__UpperCAmelCase )
else:
lowerCAmelCase__ : int = save_path + '''_''' + get_timestamp()
os.makedirs(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = save_path
lowerCAmelCase__ : Dict = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(__UpperCAmelCase ) )
lowerCAmelCase__ : Union[str, Any] = loop_post_process(__UpperCAmelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) ):
if show_intermediate:
show_pil(__UpperCAmelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(__UpperCAmelCase )} )
if show_final:
show_pil(__UpperCAmelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) )
| 700
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowerCAmelCase ( UpperCamelCase ) -> str:
for param in module.parameters():
lowerCAmelCase__ : int = False
def __lowerCAmelCase ( ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase__ : Optional[Any] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __lowerCAmelCase ( UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : str = plt.imshow(UpperCamelCase )
fig.axes.get_xaxis().set_visible(UpperCamelCase )
fig.axes.get_yaxis().set_visible(UpperCamelCase )
plt.show()
def __lowerCAmelCase ( ) -> str:
lowerCAmelCase__ : Dict = datetime.now()
lowerCAmelCase__ : Optional[int] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 470
| 0
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=6_4 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = vocab_size - 1
def lowerCamelCase_ ( self ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCamelCase_ ( self ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def lowerCamelCase_ ( self ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = True
return config, input_ids, input_mask, token_labels
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = GPTNeoXModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
__lowerCamelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = True
__lowerCamelCase = GPTNeoXModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = GPTNeoXForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = GPTNeoXForQuestionAnswering(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = GPTNeoXForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = GPTNeoXForTokenClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = True
__lowerCamelCase = GPTNeoXForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
# first forward pass
__lowerCamelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase )
__lowerCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCamelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase , output_hidden_states=UpperCAmelCase )
__lowerCamelCase = output_from_no_past["""hidden_states"""][0]
__lowerCamelCase = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )["""hidden_states"""][0]
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-3 ) )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
"""simple docstring"""
A = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
A = (GPTNeoXForCausalLM,) if is_torch_available() else ()
A = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = False
A = False
A = False
def lowerCamelCase_ ( self ):
__lowerCamelCase = GPTNeoXModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=6_4 , num_attention_heads=8 )
def lowerCamelCase_ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase_ ( self ):
# This regression test was failing with PyTorch < 1.3
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowerCamelCase = None
self.model_tester.create_and_check_model_as_decoder(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def lowerCamelCase_ ( self ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def lowerCamelCase_ ( self , UpperCAmelCase ):
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = ids_tensor([1, 1_0] , config.vocab_size )
__lowerCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
__lowerCamelCase = GPTNeoXModel(UpperCAmelCase )
original_model.to(UpperCAmelCase )
original_model.eval()
__lowerCamelCase = original_model(UpperCAmelCase ).last_hidden_state
__lowerCamelCase = original_model(UpperCAmelCase ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
__lowerCamelCase = {"""type""": scaling_type, """factor""": 10.0}
__lowerCamelCase = GPTNeoXModel(UpperCAmelCase )
scaled_model.to(UpperCAmelCase )
scaled_model.eval()
__lowerCamelCase = scaled_model(UpperCAmelCase ).last_hidden_state
__lowerCamelCase = scaled_model(UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-5 ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase_ ( self ):
__lowerCamelCase = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
__lowerCamelCase = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(UpperCAmelCase )
__lowerCamelCase = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(UpperCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__lowerCamelCase = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
__lowerCamelCase = model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=2_0 )
__lowerCamelCase = tokenizer.batch_decode(UpperCAmelCase )[0]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 479
|
from __future__ import annotations
_a : Dict = list[list[int]]
# assigning initial values to the grid
_a : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
_a : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def UpperCamelCase__ ( _A: Matrix , _A: int , _A: int , _A: int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def UpperCamelCase__ ( _A: Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def UpperCamelCase__ ( _A: Matrix ):
'''simple docstring'''
if location := find_empty_location(_A ):
__lowerCamelCase , __lowerCamelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_A , _A , _A , _A ):
__lowerCamelCase = digit
if sudoku(_A ) is not None:
return grid
__lowerCamelCase = 0
return None
def UpperCamelCase__ ( _A: Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(_A , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
_a : int = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 479
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Any = '''gpt_neo'''
_lowerCAmelCase : Tuple = ['''past_key_values''']
_lowerCAmelCase : str = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , lowercase__=5_0_2_5_7 , lowercase__=2_0_4_8 , lowercase__=2_0_4_8 , lowercase__=2_4 , lowercase__=[[["global", "local"], 1_2]] , lowercase__=1_6 , lowercase__=None , lowercase__=2_5_6 , lowercase__="gelu_new" , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=1e-5 , lowercase__=0.0_2 , lowercase__=True , lowercase__=5_0_2_5_6 , lowercase__=5_0_2_5_6 , **lowercase__ , ):
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Dict = num_layers
__UpperCAmelCase : Union[str, Any] = num_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Optional[Any] = window_size
__UpperCAmelCase : List[str] = activation_function
__UpperCAmelCase : Union[str, Any] = resid_dropout
__UpperCAmelCase : Any = embed_dropout
__UpperCAmelCase : Optional[int] = attention_dropout
__UpperCAmelCase : List[str] = classifier_dropout
__UpperCAmelCase : Optional[int] = layer_norm_epsilon
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : Tuple = bos_token_id
__UpperCAmelCase : Optional[Any] = eos_token_id
__UpperCAmelCase : List[Any] = attention_types
__UpperCAmelCase : str = self.expand_attention_types_params(lowercase__)
if len(self.attention_layers) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
F"but is `len(config.attention_layers) = {len(self.attention_layers)}`, "
F"`config.num_layers = {self.num_layers}`. "
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''')
super().__init__(bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
@staticmethod
def A( lowercase__):
__UpperCAmelCase : int = []
for item in attention_types:
for _ in range(item[1]):
attentions.extend(item[0])
return attentions
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
import torch
__UpperCAmelCase : Tuple = input.size()
__UpperCAmelCase : Any = len(lowercase_ )
__UpperCAmelCase : str = shape[dimension]
__UpperCAmelCase : List[str] = torch.arange(0 , lowercase_ , lowercase_ )
__UpperCAmelCase : Tuple = torch.div(sizedim - size , lowercase_ , rounding_mode='''floor''' ) + 1
__UpperCAmelCase : List[Any] = torch.arange(lowercase_ ) + low_indices[:min_length][:, None]
__UpperCAmelCase : Tuple = [slice(lowercase_ )] * rank
__UpperCAmelCase : str = indices
__UpperCAmelCase : Any = input[s]
__UpperCAmelCase : int = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
import torch
__UpperCAmelCase : Optional[int] = torch.arange(1 , lowercase_ )
__UpperCAmelCase : Dict = torch.remainder(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = remainders == 0
__UpperCAmelCase : str = candidates[divisor_indices]
__UpperCAmelCase : List[Any] = torch.max(lowercase_ )
return largest_divisor, torch.div(lowercase_ , lowercase_ , rounding_mode='''floor''' )
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
__UpperCAmelCase : Dict = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}})
if self.use_past:
self.fill_with_past_key_values_(lowercase__ , direction='''inputs''')
__UpperCAmelCase : Dict = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def A( self):
return self._config.num_heads
def A( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ):
__UpperCAmelCase : Dict = super(lowercase__ , self).generate_dummy_inputs(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__)
# We need to order the input in the way they appears in the forward()
__UpperCAmelCase : List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
__UpperCAmelCase , __UpperCAmelCase : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__UpperCAmelCase : Optional[Any] = seqlen + 2
__UpperCAmelCase : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCAmelCase : Dict = [
(torch.zeros(lowercase__), torch.zeros(lowercase__)) for _ in range(self.num_layers)
]
__UpperCAmelCase : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
__UpperCAmelCase : Dict = ordered_inputs['''attention_mask'''].dtype
__UpperCAmelCase : int = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(lowercase__ , lowercase__ , dtype=lowercase__)] , dim=1)
return ordered_inputs
@property
def A( self):
return 1_3
| 675
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[str] = '''sew-d'''
def __init__( self , lowercase__=3_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__=2 , lowercase__=5_1_2 , lowercase__=2_5_6 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.0_2 , lowercase__=1e-7 , lowercase__=1e-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=1_2_8 , lowercase__=1_6 , lowercase__=True , lowercase__=0.0_5 , lowercase__=1_0 , lowercase__=2 , lowercase__=0.0 , lowercase__=1_0 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=2_5_6 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ):
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__)
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : int = feat_extract_norm
__UpperCAmelCase : List[str] = feat_extract_activation
__UpperCAmelCase : str = list(lowercase__)
__UpperCAmelCase : Optional[int] = list(lowercase__)
__UpperCAmelCase : Tuple = list(lowercase__)
__UpperCAmelCase : Tuple = conv_bias
__UpperCAmelCase : int = num_conv_pos_embeddings
__UpperCAmelCase : int = num_conv_pos_embedding_groups
__UpperCAmelCase : Any = len(self.conv_dim)
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = squeeze_factor
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : List[str] = position_buckets
__UpperCAmelCase : Tuple = share_att_key
__UpperCAmelCase : int = relative_attention
__UpperCAmelCase : str = norm_rel_ebd
__UpperCAmelCase : Dict = list(lowercase__)
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Optional[int] = hidden_dropout
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : Optional[int] = activation_dropout
__UpperCAmelCase : Optional[Any] = feat_proj_dropout
__UpperCAmelCase : Optional[Any] = final_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : str = feature_layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"
F"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase : Optional[int] = apply_spec_augment
__UpperCAmelCase : List[str] = mask_time_prob
__UpperCAmelCase : Union[str, Any] = mask_time_length
__UpperCAmelCase : Optional[int] = mask_time_min_masks
__UpperCAmelCase : Optional[int] = mask_feature_prob
__UpperCAmelCase : List[str] = mask_feature_length
__UpperCAmelCase : List[Any] = mask_feature_min_masks
# ctc loss
__UpperCAmelCase : int = ctc_loss_reduction
__UpperCAmelCase : Union[str, Any] = ctc_zero_infinity
# sequence classification
__UpperCAmelCase : List[str] = use_weighted_layer_sum
__UpperCAmelCase : Tuple = classifier_proj_size
@property
def A( self):
return functools.reduce(operator.mul , self.conv_stride , 1)
| 675
| 1
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :List[str] , __A :str ) -> Optional[int]:
"""simple docstring"""
with open(__A , encoding="""utf-8""" ) as input_file:
SCREAMING_SNAKE_CASE__ = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
SCREAMING_SNAKE_CASE__ = input_file.read()
SCREAMING_SNAKE_CASE__ = regexp.search(__A )
return match
def _snake_case ( self :Tuple , __A :str ) -> Optional[Any]:
"""simple docstring"""
with open(__A , encoding="""utf-8""" ) as input_file:
SCREAMING_SNAKE_CASE__ = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
SCREAMING_SNAKE_CASE__ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
SCREAMING_SNAKE_CASE__ = regexp.finditer(__A )
SCREAMING_SNAKE_CASE__ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Path("""./datasets""" )
SCREAMING_SNAKE_CASE__ = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__A ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def _snake_case ( self :str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Path("""./datasets""" )
SCREAMING_SNAKE_CASE__ = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__A ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 6
|
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] = OpenAIGPTTokenizer
SCREAMING_SNAKE_CASE_: Any = OpenAIGPTTokenizerFast
SCREAMING_SNAKE_CASE_: Optional[int] = True
SCREAMING_SNAKE_CASE_: Tuple = False
def __lowerCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
_lowerCAmelCase = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return "lower newer", "lower newer"
def __lowerCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_lowerCAmelCase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_lowerCAmelCase = 'lower'
_lowerCAmelCase = ['low', 'er</w>']
_lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = tokens + ['<unk>']
_lowerCAmelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : List[Any]=15 ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
# Simple input
_lowerCAmelCase = 'This is a simple input'
_lowerCAmelCase = ['This is a simple input 1', 'This is a simple input 2']
_lowerCAmelCase = ('This is a simple input', 'This is a pair')
_lowerCAmelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Simple input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Simple input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Pair input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , )
def __lowerCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
pass
| 580
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """gpt_bigcode"""
UpperCAmelCase_ = ["""past_key_values"""]
UpperCAmelCase_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :Optional[Any] , lowerCamelCase :Any=5_0257 , lowerCamelCase :int=1024 , lowerCamelCase :int=768 , lowerCamelCase :Optional[int]=12 , lowerCamelCase :List[Any]=12 , lowerCamelCase :Union[str, Any]=None , lowerCamelCase :Optional[int]="gelu_pytorch_tanh" , lowerCamelCase :Optional[int]=0.1 , lowerCamelCase :Any=0.1 , lowerCamelCase :List[Any]=0.1 , lowerCamelCase :str=1e-5 , lowerCamelCase :Dict=0.02 , lowerCamelCase :Any=True , lowerCamelCase :List[Any]=True , lowerCamelCase :int=5_0256 , lowerCamelCase :Union[str, Any]=5_0256 , lowerCamelCase :Optional[int]=True , lowerCamelCase :Optional[Any]=True , lowerCamelCase :Optional[Any]=True , **lowerCamelCase :Optional[Any] , ) -> Any:
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = n_positions
UpperCAmelCase__ = n_embd
UpperCAmelCase__ = n_layer
UpperCAmelCase__ = n_head
UpperCAmelCase__ = n_inner
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = resid_pdrop
UpperCAmelCase__ = embd_pdrop
UpperCAmelCase__ = attn_pdrop
UpperCAmelCase__ = layer_norm_epsilon
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = scale_attn_weights
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = attention_softmax_in_fpaa
UpperCAmelCase__ = scale_attention_softmax_in_fpaa
UpperCAmelCase__ = multi_query
UpperCAmelCase__ = bos_token_id
UpperCAmelCase__ = eos_token_id
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
| 718
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : Union[str, Any] = {
"configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"],
"tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = ["BertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BertForMaskedLM",
"BertForMultipleChoice",
"BertForNextSentencePrediction",
"BertForPreTraining",
"BertForQuestionAnswering",
"BertForSequenceClassification",
"BertForTokenClassification",
"BertLayer",
"BertLMHeadModel",
"BertModel",
"BertPreTrainedModel",
"load_tf_weights_in_bert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
"TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBertEmbeddings",
"TFBertForMaskedLM",
"TFBertForMultipleChoice",
"TFBertForNextSentencePrediction",
"TFBertForPreTraining",
"TFBertForQuestionAnswering",
"TFBertForSequenceClassification",
"TFBertForTokenClassification",
"TFBertLMHeadModel",
"TFBertMainLayer",
"TFBertModel",
"TFBertPreTrainedModel",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = ["TFBertTokenizer"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
"FlaxBertForCausalLM",
"FlaxBertForMaskedLM",
"FlaxBertForMultipleChoice",
"FlaxBertForNextSentencePrediction",
"FlaxBertForPreTraining",
"FlaxBertForQuestionAnswering",
"FlaxBertForSequenceClassification",
"FlaxBertForTokenClassification",
"FlaxBertModel",
"FlaxBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 364
| 0
|
__UpperCamelCase : Tuple = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
__UpperCamelCase : str = {value: key for key, value in encode_dict.items()}
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if set(lowerCamelCase ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
__lowercase = """"""
for word in coded.split():
while len(lowerCamelCase ) != 0:
decoded += decode_dict[word[:5]]
__lowercase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80
|
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [[] for _ in range(lowerCamelCase )]
__lowercase = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(lowerCamelCase ) <= key:
return input_string
for position, character in enumerate(lowerCamelCase ):
__lowercase = position % (lowest * 2) # puts it in bounds
__lowercase = min(lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowerCamelCase )
__lowercase = ["""""".join(lowerCamelCase ) for row in temp_grid]
__lowercase = """""".join(lowerCamelCase )
return output_string
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = []
__lowercase = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
__lowercase = [[] for _ in range(lowerCamelCase )] # generates template
for position in range(len(lowerCamelCase ) ):
__lowercase = position % (lowest * 2) # puts it in bounds
__lowercase = min(lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
__lowercase = 0
for row in temp_grid: # fills in the characters
__lowercase = input_string[counter : counter + len(lowerCamelCase )]
grid.append(list(lowerCamelCase ) )
counter += len(lowerCamelCase )
__lowercase = """""" # reads as zigzag
for position in range(len(lowerCamelCase ) ):
__lowercase = position % (lowest * 2) # puts it in bounds
__lowercase = min(lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = {}
for key_guess in range(1 , len(lowerCamelCase ) ): # tries every key
__lowercase = decrypt(lowerCamelCase , lowerCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class snake_case ( _UpperCAmelCase ):
UpperCAmelCase__ = '''wavlm'''
def __init__(self , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_="group" , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , SCREAMING_SNAKE_CASE_=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE_=(10, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1_28 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3_20 , SCREAMING_SNAKE_CASE_=8_00 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.05 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=3_20 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1_00 , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="mean" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=(5_12, 5_12, 5_12, 5_12, 15_00) , SCREAMING_SNAKE_CASE_=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE_=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = feat_extract_norm
SCREAMING_SNAKE_CASE_ = feat_extract_activation
SCREAMING_SNAKE_CASE_ = list(A_ )
SCREAMING_SNAKE_CASE_ = list(A_ )
SCREAMING_SNAKE_CASE_ = list(A_ )
SCREAMING_SNAKE_CASE_ = conv_bias
SCREAMING_SNAKE_CASE_ = num_buckets
SCREAMING_SNAKE_CASE_ = max_bucket_distance
SCREAMING_SNAKE_CASE_ = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_ = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_ = len(self.conv_dim )
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = feat_proj_dropout
SCREAMING_SNAKE_CASE_ = final_dropout
SCREAMING_SNAKE_CASE_ = layerdrop
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_ctc_classes
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = do_stable_layer_norm
SCREAMING_SNAKE_CASE_ = use_weighted_layer_sum
SCREAMING_SNAKE_CASE_ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ = apply_spec_augment
SCREAMING_SNAKE_CASE_ = mask_time_prob
SCREAMING_SNAKE_CASE_ = mask_time_length
SCREAMING_SNAKE_CASE_ = mask_time_min_masks
SCREAMING_SNAKE_CASE_ = mask_feature_prob
SCREAMING_SNAKE_CASE_ = mask_feature_length
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE_ = num_codevectors_per_group
SCREAMING_SNAKE_CASE_ = num_codevector_groups
SCREAMING_SNAKE_CASE_ = contrastive_logits_temperature
SCREAMING_SNAKE_CASE_ = num_negatives
SCREAMING_SNAKE_CASE_ = codevector_dim
SCREAMING_SNAKE_CASE_ = proj_codevector_dim
SCREAMING_SNAKE_CASE_ = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE_ = ctc_loss_reduction
SCREAMING_SNAKE_CASE_ = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE_ = add_adapter
SCREAMING_SNAKE_CASE_ = adapter_kernel_size
SCREAMING_SNAKE_CASE_ = adapter_stride
SCREAMING_SNAKE_CASE_ = num_adapter_layers
SCREAMING_SNAKE_CASE_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ = list(A_ )
SCREAMING_SNAKE_CASE_ = list(A_ )
SCREAMING_SNAKE_CASE_ = list(A_ )
SCREAMING_SNAKE_CASE_ = xvector_output_dim
@property
def _lowercase (self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 705
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''glpn'''
def __init__(self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE_=[32, 64, 1_60, 2_56] , SCREAMING_SNAKE_CASE_=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE_=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[1, 2, 5, 8] , SCREAMING_SNAKE_CASE_=[4, 4, 4, 4] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1e-6 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=-1 , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_encoder_blocks
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = sr_ratios
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = patch_sizes
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = mlp_ratios
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = decoder_hidden_size
SCREAMING_SNAKE_CASE_ = max_depth
SCREAMING_SNAKE_CASE_ = head_in_index
| 628
| 0
|
'''simple docstring'''
from __future__ import annotations
class _A :
def __init__( self : Optional[int] , __magic_name__ : list[list[int]] ) -> str:
"""simple docstring"""
__snake_case : str = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(__magic_name__ ) != 0:
__snake_case : List[str] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__magic_name__ ) != cols:
raise error
for value in row:
if not isinstance(__magic_name__ , (int, float) ):
raise error
__snake_case : Any = rows
else:
__snake_case : List[Any] = []
def lowercase__ ( self : int ) -> list[list[int]]:
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return len(self.rows )
@property
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
return len(self.rows[0] )
@property
def lowercase__ ( self : str ) -> tuple[int, int]:
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def lowercase__ ( self : Optional[int] ) -> bool:
"""simple docstring"""
return self.order[0] == self.order[1]
def lowercase__ ( self : List[str] ) -> Matrix:
"""simple docstring"""
__snake_case : List[str] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__magic_name__ )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowercase__ ( self : Any ) -> bool:
"""simple docstring"""
return bool(self.determinant() )
def lowercase__ ( self : str , __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__magic_name__ ).determinant()
def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(__magic_name__ , __magic_name__ )
return -1 * self.get_minor(__magic_name__ , __magic_name__ )
def lowercase__ ( self : int ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[self.get_minor(__magic_name__ , __magic_name__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowercase__ ( self : str ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowercase__ ( self : List[str] ) -> Matrix:
"""simple docstring"""
__snake_case : List[str] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__magic_name__ )
def lowercase__ ( self : Any ) -> Matrix:
"""simple docstring"""
__snake_case : List[Any] = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[int] ) -> str:
"""simple docstring"""
return str(self.rows )
def __str__( self : Optional[Any] ) -> str:
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(__magic_name__ ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def lowercase__ ( self : Union[str, Any] , __magic_name__ : list[int] , __magic_name__ : int | None = None ) -> None:
"""simple docstring"""
__snake_case : Tuple = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(__magic_name__ , __magic_name__ ):
raise type_error
for value in row:
if not isinstance(__magic_name__ , (int, float) ):
raise type_error
if len(__magic_name__ ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(__magic_name__ )
else:
__snake_case : Optional[Any] = self.rows[0:position] + [row] + self.rows[position:]
def lowercase__ ( self : List[Any] , __magic_name__ : list[int] , __magic_name__ : int | None = None ) -> None:
"""simple docstring"""
__snake_case : Tuple = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(__magic_name__ , __magic_name__ ):
raise type_error
for value in column:
if not isinstance(__magic_name__ , (int, float) ):
raise type_error
if len(__magic_name__ ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
__snake_case : str = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__snake_case : List[Any] = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Tuple , __magic_name__ : object ) -> bool:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : List[Any] , __magic_name__ : object ) -> bool:
"""simple docstring"""
return not self == other
def __neg__( self : Dict ) -> Matrix:
"""simple docstring"""
return self * -1
def __add__( self : List[Any] , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Optional[Any] , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Union[str, Any] , __magic_name__ : Matrix | int | float ) -> Matrix:
"""simple docstring"""
if isinstance(__magic_name__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__magic_name__ , __magic_name__ ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(__magic_name__ , __magic_name__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self : Tuple , __magic_name__ : int ) -> Matrix:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
__snake_case : int = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowercase__ ( cls : Dict , __magic_name__ : list[int] , __magic_name__ : list[int] ) -> int:
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(__magic_name__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26
|
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
UpperCamelCase_ = datasets.logging.get_logger(__name__)
UpperCamelCase_ = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
UpperCamelCase_ = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
UpperCamelCase_ = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE( datasets.Metric ):
def __lowerCamelCase ( self : Tuple ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://unbabel.github.io/COMET/html/index.html' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'sources': datasets.Value('string' , id='sequence' ),
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/Unbabel/COMET'] , reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] , )
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : List[str] ) -> Dict:
if self.config_name == "default":
SCREAMING_SNAKE_CASE__ :List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
SCREAMING_SNAKE_CASE__ :List[Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowerCamelCase ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=False ) -> Any:
if gpus is None:
SCREAMING_SNAKE_CASE__ :Dict = 1 if torch.cuda.is_available() else 0
SCREAMING_SNAKE_CASE__ :Dict = {'src': sources, 'mt': predictions, 'ref': references}
SCREAMING_SNAKE_CASE__ :List[str] = [dict(zip(UpperCamelCase_ , UpperCamelCase_ ) ) for t in zip(*data.values() )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.scorer.predict(UpperCamelCase_ , gpus=UpperCamelCase_ , progress_bar=UpperCamelCase_ )
return {"mean_score": mean_score, "scores": scores}
| 209
| 0
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1, 3_8_4, 2_4, 2_4] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = parent
UpperCamelCase__ : Any = batch_size
UpperCamelCase__ : Optional[int] = image_size
UpperCamelCase__ : int = patch_size
UpperCamelCase__ : str = num_channels
UpperCamelCase__ : List[str] = is_training
UpperCamelCase__ : Union[str, Any] = use_labels
UpperCamelCase__ : Tuple = hidden_size
UpperCamelCase__ : str = num_hidden_layers
UpperCamelCase__ : int = backbone_out_indices
UpperCamelCase__ : Optional[int] = num_attention_heads
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Dict = hidden_act
UpperCamelCase__ : int = hidden_dropout_prob
UpperCamelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = initializer_range
UpperCamelCase__ : Union[str, Any] = num_labels
UpperCamelCase__ : Union[str, Any] = backbone_featmap_shape
UpperCamelCase__ : Any = scope
UpperCamelCase__ : List[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ : str = (image_size // patch_size) ** 2
UpperCamelCase__ : Union[str, Any] = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : int = None
if self.use_labels:
UpperCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : int = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [9_6, 1_9_2, 3_8_4, 7_6_8],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Dict = DPTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Any = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.num_labels
UpperCamelCase__ : Union[str, Any] = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Any = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.num_labels
UpperCamelCase__ : Optional[Any] = DPTForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : List[Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[Any] = config_and_inputs
UpperCamelCase__ : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = DPTModelTester(self )
UpperCamelCase__ : List[str] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Dict = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Dict = [*signature.parameters.keys()]
UpperCamelCase__ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Union[str, Any] = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
continue
UpperCamelCase__ : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase__ : Optional[int] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase__ ,UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Optional[int] = False
UpperCamelCase__ : List[str] = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase__ : int = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase__ : Optional[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Tuple = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCamelCase__ : Dict = model_class(config=__SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
UpperCamelCase__ : Any = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase__ : str = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase__ : Dict = DPTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Any = '''add'''
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[Any] = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : str = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCamelCase__ : Optional[int] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = prepare_img()
UpperCamelCase__ : int = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase__ : List[Any] = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 462
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=2 , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[int] = patch_size
UpperCamelCase__ : Any = num_channels
UpperCamelCase__ : Optional[Any] = is_training
UpperCamelCase__ : Dict = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : str = num_hidden_layers
UpperCamelCase__ : int = num_attention_heads
UpperCamelCase__ : List[Any] = intermediate_size
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : int = hidden_dropout_prob
UpperCamelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ : Tuple = type_sequence_label_size
UpperCamelCase__ : Optional[int] = initializer_range
UpperCamelCase__ : List[str] = scope
UpperCamelCase__ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCamelCase__ : Any = (image_size // patch_size) ** 2
UpperCamelCase__ : Any = num_patches + 2
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : Dict = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ : Tuple = TFDeiTModel(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : str = TFDeiTForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase__ : Optional[int] = 1
UpperCamelCase__ : Any = TFDeiTForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ : Any = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.type_sequence_label_size
UpperCamelCase__ : Optional[Any] = TFDeiTForImageClassification(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ : int = 1
UpperCamelCase__ : int = TFDeiTForImageClassification(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = config_and_inputs
UpperCamelCase__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = TFDeiTModelTester(self )
UpperCamelCase__ : str = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : str = [*signature.parameters.keys()]
UpperCamelCase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Any = TFDeiTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
UpperCamelCase__ : str = self.default_image_processor
UpperCamelCase__ : List[str] = prepare_img()
UpperCamelCase__ : int = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
# forward pass
UpperCamelCase__ : List[Any] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase__ : Union[str, Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 462
| 1
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (DEISMultistepScheduler,)
SCREAMING_SNAKE_CASE_ = (('''num_inference_steps''', 25),)
def __SCREAMING_SNAKE_CASE ( self , **__SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = dict(self.forward_default_kwargs )
UpperCamelCase__ : Dict = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = self.dummy_sample
UpperCamelCase__ : Optional[int] = 0.1 * sample
UpperCamelCase__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : Union[str, Any] = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCamelCase__ : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCamelCase__ : int = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase__ ,UpperCamelCase__ : Any = sample, sample
for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
UpperCamelCase__ : Union[str, Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase__ : int = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = dict(self.forward_default_kwargs )
UpperCamelCase__ : Tuple = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = self.dummy_sample
UpperCamelCase__ : List[Any] = 0.1 * sample
UpperCamelCase__ : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : Optional[int] = self.get_scheduler_config()
UpperCamelCase__ : Union[str, Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase__ : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase__ : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase__ : Optional[Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase__ : Dict = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if scheduler is None:
UpperCamelCase__ : List[str] = self.scheduler_classes[0]
UpperCamelCase__ : str = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = scheduler_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = self.scheduler_classes[0]
UpperCamelCase__ : Tuple = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = scheduler_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = 1_0
UpperCamelCase__ : Tuple = self.dummy_model()
UpperCamelCase__ : Dict = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ : List[Any] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = dict(self.forward_default_kwargs )
UpperCamelCase__ : List[Any] = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : Optional[int] = self.get_scheduler_config()
UpperCamelCase__ : Any = scheduler_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = self.dummy_sample
UpperCamelCase__ : int = 0.1 * sample
if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
UpperCamelCase__ : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
UpperCamelCase__ : Dict = dummy_past_residuals[: scheduler.config.solver_order]
UpperCamelCase__ : int = scheduler.timesteps[5]
UpperCamelCase__ : Optional[int] = scheduler.timesteps[6]
UpperCamelCase__ : Any = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase__ : Any = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[str] = DEISMultistepScheduler(**self.get_scheduler_config() )
UpperCamelCase__ : Tuple = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
UpperCamelCase__ : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCamelCase__ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCamelCase__ : int = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCamelCase__ : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
UpperCamelCase__ : Dict = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , algorithm_type='''deis''' , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Any = self.full_loop(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : str = self.full_loop()
UpperCamelCase__ : Optional[int] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Any = self.full_loop(prediction_type='''v_prediction''' )
UpperCamelCase__ : List[Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.scheduler_classes[0]
UpperCamelCase__ : Optional[Any] = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
UpperCamelCase__ : Tuple = scheduler_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = 1_0
UpperCamelCase__ : Dict = self.dummy_model()
UpperCamelCase__ : Tuple = self.dummy_sample_deter.half()
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ : Any = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
| 285
|
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=9_9 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Any = parent
UpperCamelCase__ : str = batch_size
UpperCamelCase__ : List[Any] = seq_length
UpperCamelCase__ : List[Any] = is_training
UpperCamelCase__ : Any = use_input_mask
UpperCamelCase__ : Dict = use_token_type_ids
UpperCamelCase__ : List[str] = use_labels
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : Union[str, Any] = hidden_size
UpperCamelCase__ : int = num_hidden_layers
UpperCamelCase__ : Union[str, Any] = num_attention_heads
UpperCamelCase__ : Union[str, Any] = intermediate_size
UpperCamelCase__ : Optional[int] = hidden_act
UpperCamelCase__ : Optional[Any] = hidden_dropout_prob
UpperCamelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ : Union[str, Any] = max_position_embeddings
UpperCamelCase__ : Optional[Any] = type_vocab_size
UpperCamelCase__ : int = type_sequence_label_size
UpperCamelCase__ : Tuple = initializer_range
UpperCamelCase__ : Optional[Any] = num_labels
UpperCamelCase__ : List[str] = num_choices
UpperCamelCase__ : Union[str, Any] = scope
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : Tuple = None
if self.use_input_mask:
UpperCamelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : int = None
if self.use_token_type_ids:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : int = None
UpperCamelCase__ : Tuple = None
if self.use_labels:
UpperCamelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ : int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = BioGptModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Any = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Dict = BioGptForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : str = BioGptModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# create attention mask
UpperCamelCase__ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = self.seq_length // 2
UpperCamelCase__ : List[str] = 0
# first forward pass
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCamelCase__ : Tuple = ids_tensor((1,) , __SCREAMING_SNAKE_CASE ).item() + 1
UpperCamelCase__ : List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCamelCase__ : int = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCamelCase__ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ : List[Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )] , dim=1 , )
# get two different outputs
UpperCamelCase__ : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )['''last_hidden_state''']
UpperCamelCase__ : str = model(__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )['''last_hidden_state''']
# select random slice
UpperCamelCase__ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ : Tuple = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCamelCase__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[str] = BioGptModel(config=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase__ : Union[str, Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
# first forward pass
UpperCamelCase__ : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase__ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ : Tuple = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase__ : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )['''last_hidden_state''']
UpperCamelCase__ : int = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE )[
'''last_hidden_state'''
]
# select random slice
UpperCamelCase__ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = BioGptForCausalLM(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCamelCase__ : Tuple = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : str = BioGptModel(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.num_labels
UpperCamelCase__ : Any = BioGptForTokenClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : List[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : int = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,
) : List[str] = config_and_inputs
UpperCamelCase__ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (BioGptForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Dict = BioGptModelTester(self )
UpperCamelCase__ : Dict = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ : Optional[Any] = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__SCREAMING_SNAKE_CASE , gradient_checkpointing=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Dict = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCamelCase__ : Optional[Any] = '''left'''
# Define PAD Token = EOS Token = 50256
UpperCamelCase__ : Optional[int] = tokenizer.eos_token
UpperCamelCase__ : List[str] = model.config.eos_token_id
# use different length sentences to test batching
UpperCamelCase__ : Optional[int] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
UpperCamelCase__ : Dict = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = inputs['''input_ids'''].to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = model.generate(
input_ids=__SCREAMING_SNAKE_CASE , attention_mask=inputs['''attention_mask'''].to(__SCREAMING_SNAKE_CASE ) , )
UpperCamelCase__ : int = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = model.generate(input_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
UpperCamelCase__ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_length=model.config.max_length - num_paddings )
UpperCamelCase__ : Any = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = tokenizer.decode(output_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Optional[int] = BioGptModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : int = 3
UpperCamelCase__ : Tuple = input_dict['''input_ids''']
UpperCamelCase__ : List[Any] = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase__ : Optional[int] = BioGptForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : int = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : List[str] = 3
UpperCamelCase__ : List[Any] = '''multi_label_classification'''
UpperCamelCase__ : List[str] = input_dict['''input_ids''']
UpperCamelCase__ : Tuple = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase__ : List[str] = BioGptForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : List[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
UpperCamelCase__ : Union[str, Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
UpperCamelCase__ : Tuple = model(__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase__ : Optional[Any] = 4_2_3_8_4
UpperCamelCase__ : List[Any] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCamelCase__ : List[Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
UpperCamelCase__ : Dict = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = model.generate(
**__SCREAMING_SNAKE_CASE , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 285
| 1
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
class UpperCamelCase_ ( nn.Module ):
def __init__( self :int , __A :str=3 , __A :Optional[Any]=3 , __A :str=("DownEncoderBlock2D",) , __A :Tuple=(64,) , __A :List[Any]=2 , __A :List[str]=32 , __A :str="silu" , __A :Optional[int]=True , ) -> List[str]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = layers_per_block
SCREAMING_SNAKE_CASE__ = torch.nn.Convad(
__A , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE__ = block_out_channels[0]
for i, down_block_type in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = output_channel
SCREAMING_SNAKE_CASE__ = block_out_channels[i]
SCREAMING_SNAKE_CASE__ = i == len(__A ) - 1
SCREAMING_SNAKE_CASE__ = get_down_block(
__A , num_layers=self.layers_per_block , in_channels=__A , out_channels=__A , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=__A , resnet_groups=__A , attention_head_dim=__A , temb_channels=__A , )
self.down_blocks.append(__A )
# mid
SCREAMING_SNAKE_CASE__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__A , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__A , temb_channels=__A , )
# out
SCREAMING_SNAKE_CASE__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__A , eps=1E-6 )
SCREAMING_SNAKE_CASE__ = nn.SiLU()
SCREAMING_SNAKE_CASE__ = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE__ = nn.Convad(block_out_channels[-1] , __A , 3 , padding=1 )
SCREAMING_SNAKE_CASE__ = False
def _snake_case ( self :str , __A :int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = x
SCREAMING_SNAKE_CASE__ = self.conv_in(__A )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__A :str ):
def custom_forward(*__A :str ):
return module(*__A )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(__A ) , __A , use_reentrant=__A )
# middle
SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __A , use_reentrant=__A )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint(create_custom_forward(__A ) , __A )
# middle
SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __A )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE__ = down_block(__A )
# middle
SCREAMING_SNAKE_CASE__ = self.mid_block(__A )
# post-process
SCREAMING_SNAKE_CASE__ = self.conv_norm_out(__A )
SCREAMING_SNAKE_CASE__ = self.conv_act(__A )
SCREAMING_SNAKE_CASE__ = self.conv_out(__A )
return sample
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Dict , __A :Optional[Any]=3 , __A :Tuple=3 , __A :Optional[int]=("UpDecoderBlock2D",) , __A :int=(64,) , __A :int=2 , __A :Dict=32 , __A :Dict="silu" , __A :str="group" , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = layers_per_block
SCREAMING_SNAKE_CASE__ = nn.Convad(
__A , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = nn.ModuleList([] )
SCREAMING_SNAKE_CASE__ = in_channels if norm_type == """spatial""" else None
# mid
SCREAMING_SNAKE_CASE__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__A , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__A , temb_channels=__A , )
# up
SCREAMING_SNAKE_CASE__ = list(reversed(__A ) )
SCREAMING_SNAKE_CASE__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = output_channel
SCREAMING_SNAKE_CASE__ = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE__ = i == len(__A ) - 1
SCREAMING_SNAKE_CASE__ = get_up_block(
__A , num_layers=self.layers_per_block + 1 , in_channels=__A , out_channels=__A , prev_output_channel=__A , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=__A , resnet_groups=__A , attention_head_dim=__A , temb_channels=__A , resnet_time_scale_shift=__A , )
self.up_blocks.append(__A )
SCREAMING_SNAKE_CASE__ = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE__ = SpatialNorm(block_out_channels[0] , __A )
else:
SCREAMING_SNAKE_CASE__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__A , eps=1E-6 )
SCREAMING_SNAKE_CASE__ = nn.SiLU()
SCREAMING_SNAKE_CASE__ = nn.Convad(block_out_channels[0] , __A , 3 , padding=1 )
SCREAMING_SNAKE_CASE__ = False
def _snake_case ( self :List[str] , __A :Any , __A :Dict=None ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = z
SCREAMING_SNAKE_CASE__ = self.conv_in(__A )
SCREAMING_SNAKE_CASE__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__A :str ):
def custom_forward(*__A :Dict ):
return module(*__A )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __A , __A , use_reentrant=__A )
SCREAMING_SNAKE_CASE__ = sample.to(__A )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(__A ) , __A , __A , use_reentrant=__A )
else:
# middle
SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __A , __A )
SCREAMING_SNAKE_CASE__ = sample.to(__A )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint(create_custom_forward(__A ) , __A , __A )
else:
# middle
SCREAMING_SNAKE_CASE__ = self.mid_block(__A , __A )
SCREAMING_SNAKE_CASE__ = sample.to(__A )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE__ = up_block(__A , __A )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE__ = self.conv_norm_out(__A )
else:
SCREAMING_SNAKE_CASE__ = self.conv_norm_out(__A , __A )
SCREAMING_SNAKE_CASE__ = self.conv_act(__A )
SCREAMING_SNAKE_CASE__ = self.conv_out(__A )
return sample
class UpperCamelCase_ ( nn.Module ):
def __init__( self :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[int] , __A :int=None , __A :Optional[Any]="random" , __A :int=False , __A :Any=True ) -> Any:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = n_e
SCREAMING_SNAKE_CASE__ = vq_embed_dim
SCREAMING_SNAKE_CASE__ = beta
SCREAMING_SNAKE_CASE__ = legacy
SCREAMING_SNAKE_CASE__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE__ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE__ = self.used.shape[0]
SCREAMING_SNAKE_CASE__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE__ = self.re_embed
SCREAMING_SNAKE_CASE__ = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
SCREAMING_SNAKE_CASE__ = n_e
SCREAMING_SNAKE_CASE__ = sane_index_shape
def _snake_case ( self :str , __A :List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = inds.shape
assert len(__A ) > 1
SCREAMING_SNAKE_CASE__ = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE__ = self.used.to(__A )
SCREAMING_SNAKE_CASE__ = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE__ = match.argmax(-1 )
SCREAMING_SNAKE_CASE__ = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE__ = self.unknown_index
return new.reshape(__A )
def _snake_case ( self :Dict , __A :str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = inds.shape
assert len(__A ) > 1
SCREAMING_SNAKE_CASE__ = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE__ = self.used.to(__A )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE__ = 0 # simply set to zero
SCREAMING_SNAKE_CASE__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __A )
return back.reshape(__A )
def _snake_case ( self :Tuple , __A :Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE__ = torch.argmin(torch.cdist(__A , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE__ = self.embedding(__A ).view(z.shape )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE__ = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE__ = self.remap_to_used(__A )
SCREAMING_SNAKE_CASE__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _snake_case ( self :List[Any] , __A :Tuple , __A :List[str] ) -> List[Any]:
"""simple docstring"""
if self.remap is not None:
SCREAMING_SNAKE_CASE__ = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE__ = self.unmap_to_all(__A )
SCREAMING_SNAKE_CASE__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE__ = self.embedding(__A )
if shape is not None:
SCREAMING_SNAKE_CASE__ = z_q.view(__A )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :List[Any] , __A :List[Any] , __A :int=False ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parameters
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = torch.chunk(__A , 2 , dim=1 )
SCREAMING_SNAKE_CASE__ = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
SCREAMING_SNAKE_CASE__ = deterministic
SCREAMING_SNAKE_CASE__ = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE__ = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _snake_case ( self :Any , __A :Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = randn_tensor(
self.mean.shape , generator=__A , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE__ = self.mean + self.std * sample
return x
def _snake_case ( self :Union[str, Any] , __A :int=None ) -> int:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _snake_case ( self :str , __A :Tuple , __A :int=[1, 2, 3] ) -> int:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__A )
def _snake_case ( self :Tuple ) -> Optional[int]:
"""simple docstring"""
return self.mean
| 59
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , UpperCamelCase__ , )
if isinstance(UpperCamelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image[0].size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE__ = np.concatenate(UpperCamelCase__ , axis=0 )
SCREAMING_SNAKE_CASE__ = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE__ = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = torch.cat(UpperCamelCase__ , dim=0 )
return image
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(UpperCamelCase__ , torch.Tensor ):
return mask
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = mask[0].size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE__ = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE__ = np.concatenate(UpperCamelCase__ , axis=0 )
SCREAMING_SNAKE_CASE__ = mask.astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
elif isinstance(mask[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = torch.cat(UpperCamelCase__ , dim=0 )
return mask
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
lowerCamelCase_ = 42
def __init__( self :Any , __A :List[Any] , __A :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__A , scheduler=__A )
@torch.no_grad()
def __call__( self :str , __A :Union[torch.Tensor, PIL.Image.Image] , __A :Union[torch.Tensor, PIL.Image.Image] , __A :int = 250 , __A :float = 0.0 , __A :int = 10 , __A :int = 10 , __A :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __A :Optional[str] = "pil" , __A :bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = image
SCREAMING_SNAKE_CASE__ = _preprocess_image(__A )
SCREAMING_SNAKE_CASE__ = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE__ = _preprocess_mask(__A )
SCREAMING_SNAKE_CASE__ = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE__ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__A , __A ) and len(__A ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__A )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ = original_image.shape
SCREAMING_SNAKE_CASE__ = randn_tensor(__A , generator=__A , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__A , __A , __A , self.device )
SCREAMING_SNAKE_CASE__ = eta
SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE__ = generator[0] if isinstance(__A , __A ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE__ = self.unet(__A , __A ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__A , __A , __A , __A , __A , __A ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE__ = self.scheduler.undo_step(__A , __A , __A )
SCREAMING_SNAKE_CASE__ = t
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A )
| 59
| 1
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = filter(lambda snake_case_ : p.requires_grad , model.parameters() )
UpperCAmelCase_ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
SCREAMING_SNAKE_CASE_: Tuple =logging.getLogger(__name__)
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if metric == "rouge2":
UpperCAmelCase_ = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
UpperCAmelCase_ = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
UpperCAmelCase_ = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
UpperCAmelCase_ = ModelCheckpoint(
dirpath=snake_case_ , filename=snake_case_ , monitor=f"""val_{metric}""" , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ) -> Any:
'''simple docstring'''
return EarlyStopping(
monitor=f"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=snake_case_ , verbose=snake_case_ , )
class __A ( pl.Callback ):
def _lowercase (self : Optional[Any] , __a : Optional[int] , __a : Any ):
UpperCAmelCase_ = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def _lowercase (self : str , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Optional[Any]=True ):
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
UpperCAmelCase_ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
UpperCAmelCase_ = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCAmelCase_ = od / "test_results.txt"
UpperCAmelCase_ = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCAmelCase_ = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
UpperCAmelCase_ = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCAmelCase_ = metrics[key]
if isinstance(__a , torch.Tensor ):
UpperCAmelCase_ = val.item()
UpperCAmelCase_ = f"""{key}: {val:.6f}\n"""
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
UpperCAmelCase_ = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def _lowercase (self : int , __a : Optional[int] , __a : List[Any] ):
try:
UpperCAmelCase_ = pl_module.model.model.num_parameters()
except AttributeError:
UpperCAmelCase_ = pl_module.model.num_parameters()
UpperCAmelCase_ = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def _lowercase (self : Optional[Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def _lowercase (self : List[Any] , __a : pl.Trainer , __a : int ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 78
|
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
A_ : int = VideoToVideoSDPipeline
A_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {'image', 'width', 'height'}
A_ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {'image'}
A_ : Tuple = PipelineTesterMixin.required_optional_params - {'latents'}
A_ : Dict = False
# No `output_type`.
A_ : str = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
A = CLIPTextModel(__UpperCamelCase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : str=0 ) -> Optional[int]:
# 3 frames
A = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith('mps' ):
A = torch.manual_seed(__UpperCamelCase )
else:
A = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def __UpperCamelCase ( self : Optional[int] ) -> str:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = VideoToVideoSDPipeline(**__UpperCamelCase )
A = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs(__UpperCamelCase )
A = 'np'
A = sd_pipe(**__UpperCamelCase ).frames
A = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
A = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCamelCase ( self : int ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCamelCase , expected_max_diff=5e-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __UpperCamelCase ( self : str ) -> Any:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
pass
def __UpperCamelCase ( self : Any ) -> Any:
return super().test_progress_bar()
@slow
@skip_mps
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
A = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
A = torch.Generator(device='cpu' ).manual_seed(0 )
A = torch.randn((1, 10, 3, 1_024, 576) , generator=__UpperCamelCase )
A = video.to('cuda' )
A = 'Spiderman is surfing'
A = pipe(__UpperCamelCase , video=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=3 , output_type='pt' ).frames
A = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 106
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "blenderbot-small"
lowercase = ["past_key_values"]
lowercase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Union[str, Any] , snake_case_ : Optional[int]=50_265 , snake_case_ : Any=512 , snake_case_ : Dict=8 , snake_case_ : Any=2_048 , snake_case_ : Tuple=16 , snake_case_ : Any=8 , snake_case_ : Dict=2_048 , snake_case_ : List[Any]=16 , snake_case_ : Tuple=0.0 , snake_case_ : Optional[Any]=0.0 , snake_case_ : Tuple=True , snake_case_ : str=True , snake_case_ : List[Any]="gelu" , snake_case_ : int=512 , snake_case_ : int=0.1 , snake_case_ : List[str]=0.0 , snake_case_ : str=0.0 , snake_case_ : str=0.02 , snake_case_ : Union[str, Any]=1 , snake_case_ : Optional[int]=False , snake_case_ : List[str]=0 , snake_case_ : List[Any]=1 , snake_case_ : Union[str, Any]=2 , snake_case_ : Tuple=2 , **snake_case_ : int , ):
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : str = max_position_embeddings
snake_case__ : List[Any] = d_model
snake_case__ : Dict = encoder_ffn_dim
snake_case__ : int = encoder_layers
snake_case__ : Tuple = encoder_attention_heads
snake_case__ : Any = decoder_ffn_dim
snake_case__ : Tuple = decoder_layers
snake_case__ : int = decoder_attention_heads
snake_case__ : Any = dropout
snake_case__ : Any = attention_dropout
snake_case__ : Union[str, Any] = activation_dropout
snake_case__ : Dict = activation_function
snake_case__ : Dict = init_std
snake_case__ : List[Any] = encoder_layerdrop
snake_case__ : Optional[Any] = decoder_layerdrop
snake_case__ : Any = use_cache
snake_case__ : int = encoder_layers
snake_case__ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , **snake_case_ , )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
@property
def lowerCamelCase ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : str = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
snake_case__ : List[str] = {0: """batch"""}
snake_case__ : List[str] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
snake_case__ : Any = {0: """batch""", 1: """decoder_sequence"""}
snake_case__ : int = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case__ : Dict = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
snake_case__ , snake_case__ : Tuple = self.num_layers
for i in range(snake_case_ ):
snake_case__ : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
snake_case__ : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
snake_case__ : Any = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def lowerCamelCase ( self : Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : int = super().outputs
else:
snake_case__ : str = super(snake_case_ , self ).outputs
if self.use_past:
snake_case__ , snake_case__ : List[Any] = self.num_layers
for i in range(snake_case_ ):
snake_case__ : Any = {0: """batch""", 2: """past_sequence + sequence"""}
snake_case__ : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def lowerCamelCase ( self : Tuple , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
snake_case__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Generate decoder inputs
snake_case__ : Union[str, Any] = seq_length if not self.use_past else 1
snake_case__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Any = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
snake_case__ : List[str] = dict(**snake_case_ , **snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case__ , snake_case__ : Union[str, Any] = common_inputs["""input_ids"""].shape
snake_case__ : Union[str, Any] = common_inputs["""decoder_input_ids"""].shape[1]
snake_case__ , snake_case__ : Union[str, Any] = self.num_attention_heads
snake_case__ : Dict = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : int = decoder_seq_length + 3
snake_case__ : Optional[int] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case__ : Union[str, Any] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(snake_case_ , snake_case_ )] , dim=1 )
snake_case__ : str = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case__ , snake_case__ : Optional[Any] = self.num_layers
snake_case__ : str = min(snake_case_ , snake_case_ )
snake_case__ : str = max(snake_case_ , snake_case_ ) - min_num_layers
snake_case__ : Optional[Any] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(snake_case_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
) )
# TODO: test this.
snake_case__ : Dict = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(snake_case_ , snake_case_ ):
common_inputs["past_key_values"].append((torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) )
return common_inputs
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
snake_case__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case__ , snake_case__ : Union[str, Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case__ : Any = seqlen + 2
snake_case__ , snake_case__ : Tuple = self.num_layers
snake_case__ , snake_case__ : Tuple = self.num_attention_heads
snake_case__ : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : Optional[Any] = common_inputs["""attention_mask"""].dtype
snake_case__ : List[Any] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
snake_case__ : List[str] = [
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(snake_case_ )
]
return common_inputs
def lowerCamelCase ( self : Tuple , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case__ : List[Any] = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case__ : int = tokenizer.num_special_tokens_to_add(snake_case_ )
snake_case__ : Dict = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
snake_case__ : Tuple = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case__ : List[Any] = dict(tokenizer(snake_case_ , return_tensors=snake_case_ ) )
return common_inputs
def lowerCamelCase ( self : List[Any] , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
elif self.task == "causal-lm":
snake_case__ : Dict = self._generate_dummy_inputs_for_causal_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
else:
snake_case__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
return common_inputs
def lowerCamelCase ( self : Optional[Any] , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : List[str] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Union[str, Any] = super()._flatten_past_key_values_(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
snake_case__ : Any = super(snake_case_ , self )._flatten_past_key_values_(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
| 301
|
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = (UnCLIPScheduler,)
def lowerCamelCase ( self : Optional[Any] , **snake_case_ : Dict ):
snake_case__ : List[Any] = {
"""num_train_timesteps""": 1_000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**snake_case_ )
return config
def lowerCamelCase ( self : Any ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def lowerCamelCase ( self : Dict ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=snake_case_ )
def lowerCamelCase ( self : int ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case_ )
def lowerCamelCase ( self : Union[str, Any] ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=snake_case_ )
def lowerCamelCase ( self : Tuple ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=snake_case_ )
def lowerCamelCase ( self : Union[str, Any] ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=snake_case_ , prev_timestep=snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : Dict = self.scheduler_classes[0]
snake_case__ : Tuple = self.get_scheduler_config(variance_type="""fixed_small_log""" )
snake_case__ : List[Any] = scheduler_class(**snake_case_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0E-1_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1E-5
def lowerCamelCase ( self : Dict ):
snake_case__ : Optional[Any] = self.scheduler_classes[0]
snake_case__ : Tuple = self.get_scheduler_config(variance_type="""learned_range""" )
snake_case__ : Optional[Any] = scheduler_class(**snake_case_ )
snake_case__ : List[Any] = 0.5
assert scheduler._get_variance(1 , predicted_variance=snake_case_ ) - -10.1712790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=snake_case_ ) - -5.7998052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=snake_case_ ) - -0.0010011 < 1E-5
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : str = self.scheduler_classes[0]
snake_case__ : int = self.get_scheduler_config()
snake_case__ : int = scheduler_class(**snake_case_ )
snake_case__ : List[Any] = scheduler.timesteps
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Union[str, Any] = self.dummy_sample_deter
snake_case__ : int = torch.manual_seed(0 )
for i, t in enumerate(snake_case_ ):
# 1. predict noise residual
snake_case__ : List[Any] = model(snake_case_ , snake_case_ )
# 2. predict previous mean of sample x_t-1
snake_case__ : Optional[Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
snake_case__ : List[Any] = pred_prev_sample
snake_case__ : List[Any] = torch.sum(torch.abs(snake_case_ ) )
snake_case__ : str = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 252.2682495 ) < 1E-2
assert abs(result_mean.item() - 0.3284743 ) < 1E-3
def lowerCamelCase ( self : List[str] ):
snake_case__ : Dict = self.scheduler_classes[0]
snake_case__ : List[str] = self.get_scheduler_config()
snake_case__ : Tuple = scheduler_class(**snake_case_ )
scheduler.set_timesteps(25 )
snake_case__ : Union[str, Any] = scheduler.timesteps
snake_case__ : Optional[Any] = self.dummy_model()
snake_case__ : int = self.dummy_sample_deter
snake_case__ : List[str] = torch.manual_seed(0 )
for i, t in enumerate(snake_case_ ):
# 1. predict noise residual
snake_case__ : Union[str, Any] = model(snake_case_ , snake_case_ )
if i + 1 == timesteps.shape[0]:
snake_case__ : int = None
else:
snake_case__ : List[Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
snake_case__ : Optional[Any] = scheduler.step(
snake_case_ , snake_case_ , snake_case_ , prev_timestep=snake_case_ , generator=snake_case_ ).prev_sample
snake_case__ : Optional[int] = pred_prev_sample
snake_case__ : List[Any] = torch.sum(torch.abs(snake_case_ ) )
snake_case__ : Tuple = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 258.2044983 ) < 1E-2
assert abs(result_mean.item() - 0.3362038 ) < 1E-3
def lowerCamelCase ( self : Any ):
pass
def lowerCamelCase ( self : Union[str, Any] ):
pass
| 301
| 1
|
def lowerCAmelCase_ ( __a = 10 , __a = 1000 , __a = True ) -> Union[str, Any]:
"""simple docstring"""
assert (
isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def lowerCAmelCase_ ( __a , __a ) -> Union[str, Any]:
"""simple docstring"""
return int((number_a + number_a) / 2 )
def lowerCAmelCase_ ( __a , __a , __a ) -> str:
"""simple docstring"""
assert (
isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(__a ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
lowerCamelCase__: Any =lower
lowerCamelCase__: Optional[Any] =higher
lowerCamelCase__: Dict =[]
while True:
lowerCamelCase__: Optional[Any] =get_avg(UpperCAmelCase_ , UpperCAmelCase_ )
last_numbers.append(UpperCAmelCase_ )
if answer(UpperCAmelCase_ ) == "low":
lowerCamelCase__: Tuple =number
elif answer(UpperCAmelCase_ ) == "high":
lowerCamelCase__: Any =number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def lowerCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Dict =int(input("Enter lower value : " ).strip() )
lowerCamelCase__: int =int(input("Enter high value : " ).strip() )
lowerCamelCase__: List[str] =int(input("Enter value to guess : " ).strip() )
guess_the_number(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 59
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_: Any = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_: Dict = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_: List[str] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
lowercase_: List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 648
| 0
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class snake_case_ (lowercase__ ):
"""simple docstring"""
_lowerCamelCase = """detr"""
_lowerCamelCase = ["""past_key_values"""]
_lowerCamelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self ,lowercase=True ,lowercase=None ,lowercase=3 ,lowercase=100 ,lowercase=6 ,lowercase=2048 ,lowercase=8 ,lowercase=6 ,lowercase=2048 ,lowercase=8 ,lowercase=0.0 ,lowercase=0.0 ,lowercase=True ,lowercase="relu" ,lowercase=256 ,lowercase=0.1 ,lowercase=0.0 ,lowercase=0.0 ,lowercase=0.02 ,lowercase=1.0 ,lowercase=False ,lowercase="sine" ,lowercase="resnet50" ,lowercase=True ,lowercase=False ,lowercase=1 ,lowercase=5 ,lowercase=2 ,lowercase=1 ,lowercase=1 ,lowercase=5 ,lowercase=2 ,lowercase=0.1 ,**lowercase ,):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
UpperCAmelCase_ : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage4"])
elif isinstance(lowercase ,lowercase):
UpperCAmelCase_ : Optional[Any] = backbone_config.get("model_type")
UpperCAmelCase_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : int = config_class.from_dict(lowercase)
# set timm attributes to None
UpperCAmelCase_ : Optional[Any] = None, None, None
UpperCAmelCase_ : int = use_timm_backbone
UpperCAmelCase_ : List[str] = backbone_config
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : List[Any] = num_queries
UpperCAmelCase_ : str = d_model
UpperCAmelCase_ : int = encoder_ffn_dim
UpperCAmelCase_ : Optional[int] = encoder_layers
UpperCAmelCase_ : Dict = encoder_attention_heads
UpperCAmelCase_ : Optional[int] = decoder_ffn_dim
UpperCAmelCase_ : Any = decoder_layers
UpperCAmelCase_ : Optional[int] = decoder_attention_heads
UpperCAmelCase_ : Union[str, Any] = dropout
UpperCAmelCase_ : int = attention_dropout
UpperCAmelCase_ : Any = activation_dropout
UpperCAmelCase_ : Any = activation_function
UpperCAmelCase_ : Tuple = init_std
UpperCAmelCase_ : Tuple = init_xavier_std
UpperCAmelCase_ : int = encoder_layerdrop
UpperCAmelCase_ : Dict = decoder_layerdrop
UpperCAmelCase_ : Optional[Any] = encoder_layers
UpperCAmelCase_ : List[Any] = auxiliary_loss
UpperCAmelCase_ : List[str] = position_embedding_type
UpperCAmelCase_ : List[Any] = backbone
UpperCAmelCase_ : Union[str, Any] = use_pretrained_backbone
UpperCAmelCase_ : Any = dilation
# Hungarian matcher
UpperCAmelCase_ : Dict = class_cost
UpperCAmelCase_ : List[str] = bbox_cost
UpperCAmelCase_ : Optional[int] = giou_cost
# Loss coefficients
UpperCAmelCase_ : str = mask_loss_coefficient
UpperCAmelCase_ : int = dice_loss_coefficient
UpperCAmelCase_ : Optional[int] = bbox_loss_coefficient
UpperCAmelCase_ : List[Any] = giou_loss_coefficient
UpperCAmelCase_ : str = eos_coefficient
super().__init__(is_encoder_decoder=lowercase ,**lowercase)
@property
def A_ ( self):
"""simple docstring"""
return self.encoder_attention_heads
@property
def A_ ( self):
"""simple docstring"""
return self.d_model
@classmethod
def A_ ( cls ,lowercase ,**lowercase):
"""simple docstring"""
return cls(backbone_config=lowercase ,**lowercase)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
UpperCAmelCase_ : Dict = self.backbone_config.to_dict()
UpperCAmelCase_ : int = self.__class__.model_type
return output
class snake_case_ (lowercase__ ):
"""simple docstring"""
_lowerCamelCase = version.parse("""1.11""" )
@property
def A_ ( self):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
])
@property
def A_ ( self):
"""simple docstring"""
return 1E-5
@property
def A_ ( self):
"""simple docstring"""
return 12
| 721
|
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ : List[str] = FunnelConfig.from_json_file(__snake_case )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ : str = FunnelBaseModel(__snake_case ) if base_model else FunnelModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
__lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 455
| 0
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
_SCREAMING_SNAKE_CASE = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def __lowerCamelCase ( ) -> Any:
snake_case = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
snake_case = bs[:]
snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
snake_case = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> int:
snake_case = set()
snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case = char
return pairs
class _lowerCAmelCase ( __lowercase ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , __snake_case : str , __snake_case : Tuple , __snake_case : Union[str, Any]="replace" , __snake_case : str="<s>" , __snake_case : Optional[int]="</s>" , __snake_case : int="</s>" , __snake_case : Optional[Any]="<s>" , __snake_case : Optional[int]="<unk>" , __snake_case : Any="<pad>" , __snake_case : Optional[int]="<mask>" , __snake_case : Optional[Any]=False , **__snake_case : Dict , )-> Any:
snake_case = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else bos_token
snake_case = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else eos_token
snake_case = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else sep_token
snake_case = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else cls_token
snake_case = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else unk_token
snake_case = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
super().__init__(
errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
snake_case = json.load(_lowerCAmelCase )
snake_case = {v: k for k, v in self.encoder.items()}
snake_case = errors # how to handle errors in decoding
snake_case = bytes_to_unicode()
snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
snake_case = merges_handle.read().split("""\n""" )[1:-1]
snake_case = [tuple(merge.split() ) for merge in bpe_merges]
snake_case = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
snake_case = {}
snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case = re.compile(r"""\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def lowerCAmelCase ( self : Union[str, Any] )-> Optional[Any]:
return len(self.encoder )
def lowerCAmelCase ( self : Dict )-> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : List[str] , __snake_case : Tuple )-> Any:
if token in self.cache:
return self.cache[token]
snake_case = tuple(_lowerCAmelCase )
snake_case = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
snake_case = min(_lowerCAmelCase , key=lambda __snake_case : self.bpe_ranks.get(_lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case = bigram
snake_case = []
snake_case = 0
while i < len(_lowerCAmelCase ):
try:
snake_case = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case = tuple(_lowerCAmelCase )
snake_case = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
snake_case = get_pairs(_lowerCAmelCase )
snake_case = ''' '''.join(_lowerCAmelCase )
snake_case = word
return word
def lowerCAmelCase ( self : Dict , __snake_case : List[str] )-> Optional[int]:
snake_case = []
for token in re.findall(self.pat , _lowerCAmelCase ):
snake_case = ''''''.join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCAmelCase ).split(""" """ ) )
return bpe_tokens
def lowerCAmelCase ( self : int , __snake_case : str )-> Tuple:
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : List[Any] , __snake_case : Union[str, Any] )-> List[Any]:
return self.decoder.get(_lowerCAmelCase )
def lowerCAmelCase ( self : Optional[Any] , __snake_case : Tuple )-> Optional[int]:
snake_case = ''''''.join(_lowerCAmelCase )
snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCAmelCase ( self : Any , __snake_case : List[str] , __snake_case : List[str] = None )-> Any:
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" )
snake_case = 0
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
snake_case = token_index
writer.write(""" """.join(_lowerCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self : int , __snake_case : Optional[int] , __snake_case : Optional[Any] = None )-> Optional[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case = [self.cls_token_id]
snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : List[str] , __snake_case : List[str] , __snake_case : Optional[Any] = None , __snake_case : int = False )-> str:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def lowerCAmelCase ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Any = None )-> Dict:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any]=False , **__snake_case : Union[str, Any] )-> Any:
snake_case = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCAmelCase ) > 0 and not text[0].isspace()):
snake_case = ''' ''' + text
return (text, kwargs)
| 369
|
import re
def lowerCAmelCase ( UpperCamelCase__ : str ) -> bool:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
lowerCAmelCase : str = """0094702343221"""
print(is_sri_lankan_phone_number(phone))
| 202
| 0
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[Any] = 11
_lowerCamelCase : str = int("1" + "0" * digit_len )
for num in range(_lowerCAmelCase , _lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCAmelCase , _lowerCAmelCase ):
solutions.append(F'{num}/{den}' )
den += 1
num += 1
_lowerCamelCase : Any = 10
return solutions
def A_ ( _lowerCAmelCase : int = 2 ):
"""simple docstring"""
_lowerCamelCase : int = 1.0
for fraction in fraction_list(_lowerCAmelCase ):
_lowerCamelCase : List[Any] = Fraction(_lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 11
|
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 11
| 1
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A : str = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : bool , _UpperCAmelCase : str = None , _UpperCAmelCase : list = None ) -> List[Any]:
"""simple docstring"""
lowercase__ = None
lowercase__ = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
lowercase__ = os.path.abspath("""examples""" )
for item in os.listdir(_UpperCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
lowercase__ = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if os.path.isfile(_UpperCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=_UpperCAmelCase , feature_script=_UpperCAmelCase , tested_section="""main()""" if parser_only else """training_function()""" , ):
lowercase__ = compare_against_test(
os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__ = """\n""".join(_UpperCAmelCase )
if special_strings is not None:
for string in special_strings:
lowercase__ = diff.replace(_UpperCAmelCase , """""" )
self.assertEqual(_UpperCAmelCase , """""" )
def lowerCamelCase__ (self : Optional[int] ) -> int:
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , _UpperCAmelCase )
self.one_complete_example("""complete_nlp_example.py""" , _UpperCAmelCase )
def lowerCamelCase__ (self : Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
lowercase__ = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.one_complete_example("""complete_cv_example.py""" , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = False
@classmethod
def lowerCamelCase__ (cls : Tuple ) -> int:
"""simple docstring"""
super().setUpClass()
lowercase__ = tempfile.mkdtemp()
lowercase__ = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
lowercase__ = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def lowerCamelCase__ (cls : List[Any] ) -> int:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowerCamelCase__ (self : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def lowerCamelCase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
lowercase__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def lowerCamelCase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
lowercase__ = run_command(self._launch_args + testargs , return_stdout=_UpperCAmelCase )
self.assertNotIn("""epoch 0:""" , _UpperCAmelCase )
self.assertIn("""epoch 1:""" , _UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
lowercase__ = run_command(self._launch_args + testargs , return_stdout=_UpperCAmelCase )
if torch.cuda.is_available():
lowercase__ = torch.cuda.device_count()
else:
lowercase__ = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , _UpperCAmelCase )
self.assertIn("""epoch 1:""" , _UpperCAmelCase )
else:
self.assertIn("""epoch 0:""" , _UpperCAmelCase )
self.assertIn("""epoch 1:""" , _UpperCAmelCase )
@slow
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
lowercase__ = run_command(self._launch_args + testargs , return_stdout=_UpperCAmelCase )
lowercase__ = re.findall("""({.+})""" , _UpperCAmelCase )
lowercase__ = [r for r in results if """accuracy""" in r][-1]
lowercase__ = ast.literal_eval(_UpperCAmelCase )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def lowerCamelCase__ (self : List[Any] ) -> int:
"""simple docstring"""
lowercase__ = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCamelCase__ (self : List[str] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
lowercase__ = f'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """tracking""" ) ) )
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def lowerCamelCase__ (self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 15
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
__lowerCAmelCase = nn.Parameter(_lowerCAmelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
__lowerCAmelCase = nn.Parameter(_lowerCAmelCase )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# set torch weights for 1-to-1 comparison
__lowerCAmelCase = np.asarray(weights[0] )
__lowerCAmelCase = np.asarray(weights[1] )
__lowerCAmelCase = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowerCAmelCase ).view(-1 , _lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# set torch weights for 1-to-1 comparison
__lowerCAmelCase = np.asarray(weights[0] )
__lowerCAmelCase = np.asarray(weights[1] )
__lowerCAmelCase = np.asarray(weights[2] )
__lowerCAmelCase = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowerCAmelCase ).view(-1 , _lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# layernorm 1
__lowerCAmelCase = weights[0][0][0]
__lowerCAmelCase = np.asarray(layer_norm_a[0] )
__lowerCAmelCase = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_lowerCAmelCase ) , torch.tensor(_lowerCAmelCase ) , )
# lsh weights + output
__lowerCAmelCase = weights[0][1]
if len(_lowerCAmelCase ) < 4:
set_layer_weights_in_torch_lsh(_lowerCAmelCase , torch_block.attention , _lowerCAmelCase )
else:
set_layer_weights_in_torch_local(_lowerCAmelCase , torch_block.attention , _lowerCAmelCase )
# intermediate weighs
__lowerCAmelCase = weights[2][0][1][2]
# Chunked Feed Forward
if len(_lowerCAmelCase ) == 4:
__lowerCAmelCase = intermediate_weights[2]
# layernorm 2
__lowerCAmelCase = np.asarray(intermediate_weights[0][0] )
__lowerCAmelCase = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_lowerCAmelCase ) , torch.tensor(_lowerCAmelCase ) , )
# intermediate dense
__lowerCAmelCase = np.asarray(intermediate_weights[1][0] )
__lowerCAmelCase = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowerCAmelCase ) , )
# intermediate out
__lowerCAmelCase = np.asarray(intermediate_weights[4][0] )
__lowerCAmelCase = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowerCAmelCase ) , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# reformer model
__lowerCAmelCase = torch_model.reformer
# word embeds
__lowerCAmelCase = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_lowerCAmelCase ) , )
if isinstance(weights[3] , _lowerCAmelCase ):
__lowerCAmelCase = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__lowerCAmelCase = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
__lowerCAmelCase = nn.Parameter(torch.tensor(_lowerCAmelCase ) )
__lowerCAmelCase = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_lowerCAmelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__lowerCAmelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# output layer norm
__lowerCAmelCase = np.asarray(weights[7][0] )
__lowerCAmelCase = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_lowerCAmelCase ) , torch.tensor(_lowerCAmelCase ) , )
# output embeddings
__lowerCAmelCase = np.asarray(weights[9][0] )
__lowerCAmelCase = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowerCAmelCase ) , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# Initialise PyTorch model
__lowerCAmelCase = ReformerConfig.from_json_file(_lowerCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
__lowerCAmelCase = ReformerModelWithLMHead(_lowerCAmelCase )
with open(_lowerCAmelCase , """rb""" ) as f:
__lowerCAmelCase = pickle.load(_lowerCAmelCase )["""weights"""]
set_model_weights_in_torch(_lowerCAmelCase , _lowerCAmelCase , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 465
| 0
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class a ( nn.Module ):
"""simple docstring"""
lowerCamelCase :int
lowerCamelCase :int
lowerCamelCase :float = 0.0
lowerCamelCase :int = 1
lowerCamelCase :int = 1
lowerCamelCase :bool = True
lowerCamelCase :bool = False
lowerCamelCase :bool = False
lowerCamelCase :bool = False
lowerCamelCase :jnp.dtype = jnp.floataa
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=lowerCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase_ )
_A = resnets
_A = attentions
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True ) -> List[Any]:
_A = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_A = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
_A = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(lowerCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class a ( nn.Module ):
"""simple docstring"""
lowerCamelCase :int
lowerCamelCase :int
lowerCamelCase :float = 0.0
lowerCamelCase :int = 1
lowerCamelCase :bool = True
lowerCamelCase :jnp.dtype = jnp.floataa
def UpperCAmelCase ( self ) -> Optional[int]:
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=lowerCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_A = resnets
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True ) -> Union[str, Any]:
_A = ()
for resnet in self.resnets:
_A = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(lowerCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class a ( nn.Module ):
"""simple docstring"""
lowerCamelCase :int
lowerCamelCase :int
lowerCamelCase :int
lowerCamelCase :float = 0.0
lowerCamelCase :int = 1
lowerCamelCase :int = 1
lowerCamelCase :bool = True
lowerCamelCase :bool = False
lowerCamelCase :bool = False
lowerCamelCase :bool = False
lowerCamelCase :jnp.dtype = jnp.floataa
def UpperCAmelCase ( self ) -> Optional[int]:
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase_ )
_A = resnets
_A = attentions
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True ) -> Optional[int]:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
_A = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
if self.add_upsample:
_A = self.upsamplers_a(lowerCAmelCase_ )
return hidden_states
class a ( nn.Module ):
"""simple docstring"""
lowerCamelCase :int
lowerCamelCase :int
lowerCamelCase :int
lowerCamelCase :float = 0.0
lowerCamelCase :int = 1
lowerCamelCase :bool = True
lowerCamelCase :jnp.dtype = jnp.floataa
def UpperCAmelCase ( self ) -> Tuple:
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_A = resnets
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True ) -> Union[str, Any]:
for resnet in self.resnets:
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
if self.add_upsample:
_A = self.upsamplers_a(lowerCAmelCase_ )
return hidden_states
class a ( nn.Module ):
"""simple docstring"""
lowerCamelCase :int
lowerCamelCase :float = 0.0
lowerCamelCase :int = 1
lowerCamelCase :int = 1
lowerCamelCase :bool = False
lowerCamelCase :bool = False
lowerCamelCase :jnp.dtype = jnp.floataa
def UpperCAmelCase ( self ) -> Optional[Any]:
# there is always at least one resnet
_A = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_A = []
for _ in range(self.num_layers ):
_A = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase_ )
_A = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_A = resnets
_A = attentions
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True ) -> List[str]:
_A = self.resnets[0](lowerCAmelCase_ , lowerCAmelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_A = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
_A = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
return hidden_states
| 83
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = '''philschmid/bart-large-cnn-samsum'''
lowerCamelCase :Tuple = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
lowerCamelCase :List[Any] = '''summarizer'''
lowerCamelCase :List[str] = AutoTokenizer
lowerCamelCase :Dict = AutoModelForSeqaSeqLM
lowerCamelCase :int = ['''text''']
lowerCamelCase :List[Any] = ['''text''']
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
return self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
return self.model.generate(**lowerCAmelCase_ )[0]
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return self.pre_processor.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
| 83
| 1
|
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list ):
if any(not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(UpperCamelCase__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(UpperCamelCase__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 6
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
UpperCamelCase , UpperCamelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
UpperCamelCase = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
UpperCamelCase = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
UpperCamelCase = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 590
| 0
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = len(lowercase_ ) // 2
# choose the middle 3 elements
__UpperCAmelCase : Optional[Any] = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675
|
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path_factory.getbasetemp() / '''cache'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''datasets'''
__UpperCAmelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
__UpperCAmelCase : Any = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
__UpperCAmelCase : List[Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 675
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = GPTSanJapaneseTokenizer
_UpperCAmelCase = False
_UpperCAmelCase = {'''do_clean_text''': False, '''add_prefix_space''': False}
def lowerCamelCase_ ( self ) -> Tuple:
super().setUp()
# fmt: off
_UpperCAmelCase = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
_UpperCAmelCase = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
_UpperCAmelCase = {'unk_token': '<unk>'}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(snake_case ) )
def lowerCamelCase_ ( self , **snake_case ) -> Tuple:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self , snake_case ) -> str:
_UpperCAmelCase = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
_UpperCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def lowerCamelCase_ ( self , snake_case ) -> List[Any]:
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(snake_case )
_UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
_UpperCAmelCase = tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case )
return text, ids
def lowerCamelCase_ ( self ) -> Dict:
pass # TODO add if relevant
def lowerCamelCase_ ( self ) -> Dict:
pass # TODO add if relevant
def lowerCamelCase_ ( self ) -> Optional[int]:
pass # TODO add if relevant
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = self.get_tokenizer()
# Testing tokenization
_UpperCAmelCase = 'こんにちは、世界。 こんばんは、㔺界。'
_UpperCAmelCase = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
_UpperCAmelCase = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
# Testing conversion to ids without special tokens
_UpperCAmelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(snake_case , snake_case )
# Testing conversion to ids with special tokens
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = self.get_tokenizer()
# Testing tokenization
_UpperCAmelCase = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
_UpperCAmelCase = 'こんにちは、、、、世界。こんばんは、、、、世界。'
_UpperCAmelCase = tokenizer.encode(snake_case )
_UpperCAmelCase = tokenizer.decode(snake_case )
self.assertEqual(snake_case , snake_case )
@slow
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
_UpperCAmelCase = 'こんにちは、世界。'
_UpperCAmelCase = 'こんばんは、㔺界。😀'
_UpperCAmelCase = 'こんにちは、世界。こんばんは、世界。😀'
_UpperCAmelCase = tokenizer.encode(prefix_text + input_text )
_UpperCAmelCase = tokenizer.encode('' , prefix_text=prefix_text + input_text )
_UpperCAmelCase = tokenizer.encode(snake_case , prefix_text=snake_case )
_UpperCAmelCase = tokenizer.decode(snake_case )
_UpperCAmelCase = tokenizer.decode(snake_case )
_UpperCAmelCase = tokenizer.decode(snake_case )
self.assertEqual(snake_case , snake_case )
self.assertEqual(snake_case , snake_case )
self.assertEqual(snake_case , snake_case )
@slow
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
_UpperCAmelCase = 'こんにちは、世界。'
_UpperCAmelCase = 'こんばんは、㔺界。😀'
_UpperCAmelCase = len(tokenizer.encode(snake_case ) ) - 2
_UpperCAmelCase = len(tokenizer.encode(snake_case ) ) - 2
_UpperCAmelCase = [1] + [0] * (len_prefix + len_text + 1)
_UpperCAmelCase = [1] * (len_prefix + len_text + 1) + [0]
_UpperCAmelCase = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_UpperCAmelCase = tokenizer(prefix_text + input_text ).token_type_ids
_UpperCAmelCase = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
_UpperCAmelCase = tokenizer(snake_case , prefix_text=snake_case ).token_type_ids
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(snake_case , snake_case )
@slow
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
_UpperCAmelCase = tokenizer.encode('あンいワ' )
_UpperCAmelCase = tokenizer.encode('' , prefix_text='あンいワ' )
_UpperCAmelCase = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(snake_case ) , tokenizer.decode(snake_case ) )
self.assertEqual(tokenizer.decode(snake_case ) , tokenizer.decode(snake_case ) )
self.assertNotEqual(snake_case , snake_case )
self.assertNotEqual(snake_case , snake_case )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
_UpperCAmelCase = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
_UpperCAmelCase = tokenizer(snake_case , padding=snake_case )
_UpperCAmelCase = tokenizer.batch_encode_plus(snake_case , padding=snake_case )
# fmt: off
_UpperCAmelCase = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
_UpperCAmelCase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_UpperCAmelCase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , snake_case )
self.assertListEqual(x_token.token_type_ids , snake_case )
self.assertListEqual(x_token.attention_mask , snake_case )
self.assertListEqual(x_token_a.input_ids , snake_case )
self.assertListEqual(x_token_a.token_type_ids , snake_case )
self.assertListEqual(x_token_a.attention_mask , snake_case )
def lowerCamelCase_ ( self ) -> List[Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowerCamelCase_ ( self ) -> int:
# tokenizer has no padding token
pass
| 573
|
"""simple docstring"""
def UpperCAmelCase ( A : list[int] , A : list[int] ):
'''simple docstring'''
if not len(A ) == len(A ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 573
| 1
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = SMALL_MODEL_IDENTIFIER
lowerCamelCase = "pt"
lowerCamelCase = "tf"
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__a )
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=__a )
model_tf.save_pretrained(__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = "mock_framework"
# Framework provided - return whatever the user provides
lowerCamelCase = FeaturesManager.determine_framework(self.test_model , __a )
self.assertEqual(__a , __a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
lowerCamelCase = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
lowerCamelCase = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
def _a (self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
lowerCamelCase = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
lowerCamelCase = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a ):
lowerCamelCase = FeaturesManager.determine_framework(__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_tf_available" , __a ):
lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCamelCase = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_torch_available" , __a ):
lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_tf )
# Both in environment -> use PyTorch
lowerCamelCase = MagicMock(return_value=__a )
lowerCamelCase = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_tf_available" , __a ), patch(
"transformers.onnx.features.is_torch_available" , __a ):
lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# Both not in environment -> raise error
lowerCamelCase = MagicMock(return_value=__a )
lowerCamelCase = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_tf_available" , __a ), patch(
"transformers.onnx.features.is_torch_available" , __a ):
with self.assertRaises(__a ):
lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
| 484
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowerCamelCase__ :
"""simple docstring"""
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , __a=0 , ):
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_input_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_labels
lowerCamelCase = num_choices
lowerCamelCase = scope
lowerCamelCase = projection_dim
def _a (self ):
'''simple docstring'''
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
lowerCamelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a (self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = TFDPRContextEncoder(config=__a )
lowerCamelCase = model(__a , attention_mask=__a , token_type_ids=__a )
lowerCamelCase = model(__a , token_type_ids=__a )
lowerCamelCase = model(__a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a (self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = TFDPRQuestionEncoder(config=__a )
lowerCamelCase = model(__a , attention_mask=__a , token_type_ids=__a )
lowerCamelCase = model(__a , token_type_ids=__a )
lowerCamelCase = model(__a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a (self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = TFDPRReader(config=__a )
lowerCamelCase = model(__a , attention_mask=__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) = config_and_inputs
lowerCamelCase = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
"""simple docstring"""
_A = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_A = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
_A = False
_A = False
_A = False
_A = False
_A = False
def _a (self ):
'''simple docstring'''
lowerCamelCase = TFDPRModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=__a , hidden_size=37 )
def _a (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__a )
@slow
def _a (self ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = TFDPRContextEncoder.from_pretrained(__a )
self.assertIsNotNone(__a )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = TFDPRContextEncoder.from_pretrained(__a )
self.assertIsNotNone(__a )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = TFDPRQuestionEncoder.from_pretrained(__a )
self.assertIsNotNone(__a )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = TFDPRReader.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@slow
def _a (self ):
'''simple docstring'''
lowerCamelCase = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
lowerCamelCase = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCamelCase = model(__a )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCamelCase = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 484
| 1
|
import re
from filelock import FileLock
try:
import nltk
UpperCamelCase__ = True
except (ImportError, ModuleNotFoundError):
UpperCamelCase__ = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
re.sub("<n>", "", _UpperCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_UpperCamelCase ) )
| 486
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = (3_2, 3_2)
SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case__ )
return image
@property
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
return model
@property
def UpperCamelCase ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(snake_case__ )
@property
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
def extract(*snake_case__ : List[Any] , **snake_case__ : Union[str, Any] ):
class UpperCamelCase :
def __init__( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.ones([0] )
def UpperCamelCase ( self : Any , snake_case__ : List[str] ):
"""simple docstring"""
self.pixel_values.to(snake_case__ )
return self
return Out()
return extract
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.dummy_cond_unet
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE = 7_7
SCREAMING_SNAKE_CASE = self.dummy_image.to(snake_case__ )
SCREAMING_SNAKE_CASE = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = alt_pipe(
[prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=snake_case__ , )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = alt_pipe(
[prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=snake_case__ , return_dict=snake_case__ , )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
SCREAMING_SNAKE_CASE = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.dummy_cond_unet
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE = 7_7
SCREAMING_SNAKE_CASE = self.dummy_image.to(snake_case__ )
# put models in fp16
SCREAMING_SNAKE_CASE = unet.half()
SCREAMING_SNAKE_CASE = vae.half()
SCREAMING_SNAKE_CASE = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = alt_pipe(
[prompt] , generator=snake_case__ , num_inference_steps=2 , output_type='np' , image=snake_case__ , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE = init_image.resize((7_6_0, 5_0_4) )
SCREAMING_SNAKE_CASE = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=snake_case__ , image=snake_case__ , strength=0.75 , guidance_scale=7.5 , generator=snake_case__ , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images[0]
SCREAMING_SNAKE_CASE = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
SCREAMING_SNAKE_CASE = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
SCREAMING_SNAKE_CASE = init_image.resize((7_6_8, 5_1_2) )
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
SCREAMING_SNAKE_CASE = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=snake_case__ , image=snake_case__ , strength=0.75 , guidance_scale=7.5 , generator=snake_case__ , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 439
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowerCAmelCase_ ( _UpperCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : List[str] = '''biogpt'''
def __init__( self , lowerCAmelCase=4_23_84 , lowerCAmelCase=10_24 , lowerCAmelCase=24 , lowerCAmelCase=16 , lowerCAmelCase=40_96 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=10_24 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , **lowerCAmelCase , ):
"""simple docstring"""
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = scale_embedding
snake_case = use_cache
snake_case = layerdrop
snake_case = activation_dropout
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
| 705
|
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
SCREAMING_SNAKE_CASE__ = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Any:
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
if args.student_type == "roberta":
snake_case = False
elif args.student_type == "gpt2":
snake_case = False
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict ) -> Tuple:
"""simple docstring"""
if args.student_type == "roberta":
snake_case = False
def lowerCAmelCase__ ( ) -> Optional[int]:
"""simple docstring"""
snake_case = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=_UpperCamelCase , required=_UpperCamelCase , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=_UpperCamelCase , required=_UpperCamelCase , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=_UpperCamelCase , choices=['distilbert', 'roberta', 'gpt2'] , required=_UpperCamelCase , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=_UpperCamelCase , type=_UpperCamelCase , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_UpperCamelCase , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=_UpperCamelCase , required=_UpperCamelCase , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=_UpperCamelCase , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=_UpperCamelCase , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=_UpperCamelCase , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=_UpperCamelCase , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=_UpperCamelCase , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=_UpperCamelCase , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.15 , type=_UpperCamelCase , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=_UpperCamelCase , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=_UpperCamelCase , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=_UpperCamelCase , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=_UpperCamelCase , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=_UpperCamelCase , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=_UpperCamelCase , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=_UpperCamelCase , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=_UpperCamelCase , default=5_0 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.05 , type=_UpperCamelCase , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=_UpperCamelCase , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5e-4 , type=_UpperCamelCase , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1e-6 , type=_UpperCamelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=_UpperCamelCase , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.02 , type=_UpperCamelCase , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_UpperCamelCase , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=_UpperCamelCase , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=_UpperCamelCase , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=_UpperCamelCase , default=5_6 , help='Random seed' )
parser.add_argument('--log_interval' , type=_UpperCamelCase , default=5_0_0 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=_UpperCamelCase , default=4_0_0_0 , help='Checkpoint interval.' )
snake_case = parser.parse_args()
sanity_checks(_UpperCamelCase )
# ARGS #
init_gpu_params(_UpperCamelCase )
set_seed(_UpperCamelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(_UpperCamelCase ) , _UpperCamelCase , indent=4 )
git_log(args.dump_path )
snake_case ,snake_case ,snake_case = MODEL_CLASSES[args.student_type]
snake_case ,snake_case ,snake_case = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
snake_case = teacher_tokenizer_class.from_pretrained(args.teacher_name )
snake_case = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
snake_case = tokenizer.all_special_tokens.index(_UpperCamelCase )
snake_case = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
snake_case = special_tok_ids
snake_case = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , 'rb' ) as fp:
snake_case = pickle.load(_UpperCamelCase )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , 'rb' ) as fp:
snake_case = pickle.load(_UpperCamelCase )
snake_case = np.maximum(_UpperCamelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
snake_case = 0.0 # do not predict special tokens
snake_case = torch.from_numpy(_UpperCamelCase )
else:
snake_case = None
snake_case = LmSeqsDataset(params=_UpperCamelCase , data=_UpperCamelCase )
logger.info('Data loader created.' )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
snake_case = student_config_class.from_pretrained(args.student_config )
snake_case = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
snake_case = student_model_class.from_pretrained(args.student_pretrained_weights , config=_UpperCamelCase )
else:
snake_case = student_model_class(_UpperCamelCase )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info('Student loaded.' )
# TEACHER #
snake_case = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_UpperCamelCase )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_UpperCamelCase , _UpperCamelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_UpperCamelCase , _UpperCamelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
snake_case = Distiller(
params=_UpperCamelCase , dataset=_UpperCamelCase , token_probs=_UpperCamelCase , student=_UpperCamelCase , teacher=_UpperCamelCase )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 104
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :int = KandinskyVaaControlnetImgaImgPipeline
__magic_name__ :Tuple = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
__magic_name__ :List[Any] = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
__magic_name__ :List[Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__magic_name__ :Any = False
@property
def snake_case ( self ):
'''simple docstring'''
return 3_2
@property
def snake_case ( self ):
'''simple docstring'''
return 3_2
@property
def snake_case ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case ( self ):
'''simple docstring'''
return 1_0_0
@property
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Union[str, Any] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase__ :int = UNetaDConditionModel(**__UpperCAmelCase )
return model
@property
def snake_case ( self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Any = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.dummy_unet
lowerCAmelCase__ :List[str] = self.dummy_movq
lowerCAmelCase__ :List[Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.0_00_85,
'beta_end': 0.0_12,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCAmelCase__ :Union[str, Any] = DDIMScheduler(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__UpperCAmelCase )
# create init_image
lowerCAmelCase__ :int = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ :Optional[int] = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCAmelCase__ :str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :Optional[int] = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Tuple = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :str = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = 'cpu'
lowerCAmelCase__ :Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ :str = self.pipeline_class(**__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Dict = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
lowerCAmelCase__ :List[str] = output.images
lowerCAmelCase__ :Optional[Any] = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
lowerCAmelCase__ :Any = image[0, -3:, -3:, -1]
lowerCAmelCase__ :Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCAmelCase__ :List[Any] = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCAmelCase__ :Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCAmelCase__ :Dict = init_image.resize((5_1_2, 5_1_2) )
lowerCAmelCase__ :int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCAmelCase__ :Tuple = torch.from_numpy(np.array(__UpperCAmelCase ) ).float() / 2_55.0
lowerCAmelCase__ :Union[str, Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCAmelCase__ :List[Any] = 'A robot, 4k photo'
lowerCAmelCase__ :int = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
lowerCAmelCase__ :int = pipeline.to(__UpperCAmelCase )
pipeline.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ , lowerCAmelCase__ :Any = pipe_prior(
__UpperCAmelCase , image=__UpperCAmelCase , strength=0.85 , generator=__UpperCAmelCase , negative_prompt='' , ).to_tuple()
lowerCAmelCase__ :int = pipeline(
image=__UpperCAmelCase , image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type='np' , )
lowerCAmelCase__ :Any = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 93
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class lowerCAmelCase__ ( __A ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''swinv2'''
__UpperCAmelCase : int = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , a_=224 , a_=4 , a_=3 , a_=96 , a_=[2, 2, 6, 2] , a_=[3, 6, 12, 24] , a_=7 , a_=4.0 , a_=True , a_=0.0 , a_=0.0 , a_=0.1 , a_="gelu" , a_=False , a_=0.02 , a_=1E-5 , a_=32 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : int = image_size
lowerCamelCase_ : Any = patch_size
lowerCamelCase_ : List[str] = num_channels
lowerCamelCase_ : Optional[int] = embed_dim
lowerCamelCase_ : List[str] = depths
lowerCamelCase_ : Tuple = len(a_ )
lowerCamelCase_ : List[Any] = num_heads
lowerCamelCase_ : List[str] = window_size
lowerCamelCase_ : Union[str, Any] = mlp_ratio
lowerCamelCase_ : List[Any] = qkv_bias
lowerCamelCase_ : List[Any] = hidden_dropout_prob
lowerCamelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase_ : Tuple = drop_path_rate
lowerCamelCase_ : List[Any] = hidden_act
lowerCamelCase_ : List[str] = use_absolute_embeddings
lowerCamelCase_ : List[str] = layer_norm_eps
lowerCamelCase_ : Optional[int] = initializer_range
lowerCamelCase_ : Any = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase_ : str = int(embed_dim * 2 ** (len(a_ ) - 1) )
lowerCamelCase_ : str = (0, 0, 0, 0)
| 708
|
__magic_name__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase_ : List[Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(lowerCAmelCase_)}"""
)
raise ValueError(lowerCAmelCase_)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = XLNetTokenizer
A__ : str = XLNetTokenizerFast
A__ : int = True
A__ : List[str] = True
def _a ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A__ = XLNetTokenizer(_snake_case , keep_accents=_snake_case )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : Any ):
"""simple docstring"""
A__ = '<s>'
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<eod>' )
self.assertEqual(len(_snake_case ) , 10_06 )
def _a ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = XLNetTokenizer(_snake_case , keep_accents=_snake_case )
A__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(_snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [2_85, 46, 10, 1_70, 3_82] )
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A__ = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(_snake_case , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
A__ = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = XLNetTokenizer(_snake_case , do_lower_case=_snake_case )
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['▁he', 'll', 'o'] )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = XLNetTokenizer(_snake_case , do_lower_case=_snake_case )
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = XLNetTokenizer.from_pretrained('xlnet-base-cased' )
A__ = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case )
A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def _a ( self : str ):
"""simple docstring"""
A__ = {'input_ids': [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
| 9
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = ["input_values", "attention_mask"]
def __init__( self : str , _snake_case : int = 1 , _snake_case : int = 1_60_00 , _snake_case : float = 0.0 , _snake_case : bool = False , _snake_case : int = 80 , _snake_case : int = 16 , _snake_case : int = 64 , _snake_case : str = "hann_window" , _snake_case : float = 1.0 , _snake_case : float = 80 , _snake_case : float = 76_00 , _snake_case : float = 1E-10 , _snake_case : int = 2 , _snake_case : bool = True , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
super().__init__(feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , **_snake_case )
A__ = do_normalize
A__ = return_attention_mask
A__ = num_mel_bins
A__ = hop_length
A__ = win_length
A__ = win_function
A__ = frame_signal_scale
A__ = fmin
A__ = fmax
A__ = mel_floor
A__ = reduction_factor
A__ = win_length * sampling_rate // 10_00
A__ = hop_length * sampling_rate // 10_00
A__ = optimal_fft_length(self.sample_size )
A__ = (self.n_fft // 2) + 1
A__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=_snake_case )
A__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _a ( _snake_case : List[np.ndarray] , _snake_case : List[np.ndarray] , _snake_case : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
A__ = np.array(_snake_case , np.intaa )
A__ = []
for vector, length in zip(_snake_case , attention_mask.sum(-1 ) ):
A__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
A__ = padding_value
normed_input_values.append(_snake_case )
else:
A__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _a ( self : Tuple , _snake_case : np.ndarray , ):
"""simple docstring"""
A__ = spectrogram(
_snake_case , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : List[str] , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Optional[int] = None , **_snake_case : Tuple , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
else:
A__ = None
if audio_target is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
if inputs is None:
return inputs_target
else:
A__ = inputs_target['input_values']
A__ = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def _a ( self : Tuple , _snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _snake_case : bool = False , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Tuple , ):
"""simple docstring"""
A__ = isinstance(_snake_case , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
A__ = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
A__ = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
A__ = speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ = [speech]
# needed to make pad() work on spectrogram inputs
A__ = self.feature_size
# convert into correct format for padding
if is_target:
A__ = [self._extract_mel_features(_snake_case ) for waveform in speech]
A__ = BatchFeature({'input_values': features} )
A__ = self.num_mel_bins
else:
A__ = BatchFeature({'input_values': speech} )
A__ = self.pad(
_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
A__ = feature_size_hack
# convert input values to correct format
A__ = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_snake_case , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
A__ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_snake_case , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
A__ = input_values.astype(np.floataa )
# convert attention_mask to correct format
A__ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
A__ = [np.asarray(_snake_case , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
A__ = (
attention_mask
if self._get_padding_strategies(_snake_case , max_length=_snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A__ = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_snake_case , padding_value=self.padding_value )
if return_tensors is not None:
A__ = padded_inputs.convert_to_tensors(_snake_case )
return padded_inputs
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
A__ = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 9
| 1
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
__snake_case : Optional[int] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__snake_case : str = logging.WARNING
def A ( ):
"""simple docstring"""
UpperCAmelCase__ :Optional[int] = os.getenv('DATASETS_VERBOSITY' , SCREAMING_SNAKE_CASE )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def A ( ):
"""simple docstring"""
return __name__.split('.' )[0]
def A ( ):
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def A ( ):
"""simple docstring"""
UpperCAmelCase__ :Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def A ( ):
"""simple docstring"""
UpperCAmelCase__ :List[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def A ( SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if name is None:
UpperCAmelCase__ :Optional[Any] = _get_library_name()
return logging.getLogger(SCREAMING_SNAKE_CASE )
def A ( ):
"""simple docstring"""
return _get_library_root_logger().getEffectiveLevel()
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE )
def A ( ):
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE )
def A ( ):
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE )
def A ( ):
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE )
def A ( ):
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE )
def A ( ):
"""simple docstring"""
UpperCAmelCase__ :List[Any] = False
def A ( ):
"""simple docstring"""
UpperCAmelCase__ :Union[str, Any] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , *A , **A ) ->int: # pylint: disable=unused-argument
UpperCAmelCase__ :int = args[0] if args else None
def __iter__( self ) ->Any:
return iter(self._iterator )
def __getattr__( self , A ) ->Dict:
def empty_fn(*A , **A ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) ->List[Any]:
return self
def __exit__( self , A , A , A ) ->str:
return
__snake_case : Dict = True
class UpperCamelCase__ :
'''simple docstring'''
def __call__( self , *A , A=False , **A ) ->Optional[int]:
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowerCamelCase_ , **lowerCamelCase_ )
else:
return EmptyTqdm(*lowerCamelCase_ , **lowerCamelCase_ )
def A__ ( self , *A , **A ) ->Dict:
UpperCAmelCase__ :Optional[int] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCamelCase_ , **lowerCamelCase_ )
def A__ ( self ) ->Any:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__snake_case : Tuple = _tqdm_cls()
def A ( ):
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def A ( ):
"""simple docstring"""
global _tqdm_active
UpperCAmelCase__ :Dict = True
def A ( ):
"""simple docstring"""
global _tqdm_active
UpperCAmelCase__ :str = False
| 710
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__snake_case : Dict = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 48_000,
'sample_size': 65_536,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 48_000,
'sample_size': 65_536,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 48_000,
'sample_size': 131_072,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
}
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return torch.atana(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / math.pi * 2
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :List[str] = torch.sin(t * math.pi / 2 ) ** 2
UpperCAmelCase__ :int = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
pass
class UpperCamelCase__ ( nn.Module):
'''simple docstring'''
def __init__( self , A ) ->Union[str, Any]:
super().__init__()
UpperCAmelCase__ :Dict = DiffusionAttnUnetaD(A , n_attn_layers=4 )
UpperCAmelCase__ :Optional[Any] = deepcopy(self.diffusion )
UpperCAmelCase__ :str = torch.quasirandom.SobolEngine(1 , scramble=A )
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :Dict = MODELS_MAP[model_name]['url']
os.system(f"""wget {url} ./""" )
return f"""./{model_name}.ckpt"""
__snake_case : Union[str, Any] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
__snake_case : Tuple = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
__snake_case : Optional[int] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
__snake_case : str = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
__snake_case : Optional[int] = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
__snake_case : Union[str, Any] = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(f"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE ) and not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return name.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif name.startswith(SCREAMING_SNAKE_CASE ):
return [name.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for v in value]
raise ValueError(f"""Attn error with {name}""" )
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 ):
"""simple docstring"""
UpperCAmelCase__ :Any = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
UpperCAmelCase__ :List[Any] = 0
if string.startswith('net.3.' ):
depth += 1
UpperCAmelCase__ :int = string[6:]
elif string.startswith('net.' ):
UpperCAmelCase__ :Union[str, Any] = string[4:]
while string.startswith('main.7.' ):
depth += 1
UpperCAmelCase__ :List[str] = string[7:]
if string.startswith('main.' ):
UpperCAmelCase__ :Tuple = string[5:]
# mid block
if string[:2].isdigit():
UpperCAmelCase__ :int = string[:2]
UpperCAmelCase__ :List[Any] = string[2:]
else:
UpperCAmelCase__ :Dict = string[0]
UpperCAmelCase__ :Union[str, Any] = string[1:]
if depth == max_depth:
UpperCAmelCase__ :Dict = MID_NUM_TO_LAYER[layer_num]
UpperCAmelCase__ :int = 'mid_block'
elif depth > 0 and int(SCREAMING_SNAKE_CASE ) < 7:
UpperCAmelCase__ :List[Any] = DOWN_NUM_TO_LAYER[layer_num]
UpperCAmelCase__ :Any = f"""down_blocks.{depth}"""
elif depth > 0 and int(SCREAMING_SNAKE_CASE ) > 7:
UpperCAmelCase__ :Union[str, Any] = UP_NUM_TO_LAYER[layer_num]
UpperCAmelCase__ :Union[str, Any] = f"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
UpperCAmelCase__ :List[str] = DEPTH_0_TO_LAYER[layer_num]
UpperCAmelCase__ :List[str] = f"""up_blocks.{max_depth - 1}""" if int(SCREAMING_SNAKE_CASE ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" )
UpperCAmelCase__ :int = string_left[1:]
if "resnets" in new_layer:
UpperCAmelCase__ :Optional[Any] = convert_resconv_naming(SCREAMING_SNAKE_CASE )
elif "attentions" in new_layer:
UpperCAmelCase__ :Any = convert_attn_naming(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Any = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ :Tuple = prefix + '.' + new_layer + '.' + string_left
else:
UpperCAmelCase__ :int = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :str = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
UpperCAmelCase__ :Tuple = rename(SCREAMING_SNAKE_CASE )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ :Tuple = transform_conv_attns(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase__ :Union[str, Any] = v
return new_state_dict
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 1:
if len(v.shape ) == 3:
# weight
UpperCAmelCase__ :List[str] = v[:, :, 0]
else:
# bias
UpperCAmelCase__ :Union[str, Any] = v
else:
# qkv matrices
UpperCAmelCase__ :Optional[int] = v.shape[0]
UpperCAmelCase__ :str = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
UpperCAmelCase__ :Optional[int] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
UpperCAmelCase__ :Optional[Any] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :Union[str, Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
UpperCAmelCase__ :List[Any] = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
UpperCAmelCase__ :List[str] = download(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Optional[int] = MODELS_MAP[model_name]['sample_rate']
UpperCAmelCase__ :Tuple = MODELS_MAP[model_name]['sample_size']
UpperCAmelCase__ :Optional[Any] = Object()
UpperCAmelCase__ :int = sample_size
UpperCAmelCase__ :Any = sample_rate
UpperCAmelCase__ :List[str] = 0
UpperCAmelCase__ :Union[str, Any] = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE , sample_rate=SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Dict = diffusers_model.state_dict()
UpperCAmelCase__ :Dict = DiffusionUncond(SCREAMING_SNAKE_CASE )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE )['state_dict'] )
UpperCAmelCase__ :Dict = orig_model.diffusion_ema.eval()
UpperCAmelCase__ :Optional[Any] = orig_model.state_dict()
UpperCAmelCase__ :Tuple = rename_orig_weights(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Dict = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
UpperCAmelCase__ :Dict = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE ) == 0, f"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('kernel' ) for k in list(SCREAMING_SNAKE_CASE ) ), f"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
UpperCAmelCase__ :List[Any] = value.squeeze()
UpperCAmelCase__ :List[Any] = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Optional[int] = 100
UpperCAmelCase__ :Any = 33
UpperCAmelCase__ :int = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :int = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Optional[int] = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE )[:-1]
UpperCAmelCase__ :List[Any] = get_crash_schedule(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Tuple = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Optional[Any] = torch.manual_seed(33 )
UpperCAmelCase__ :List[str] = pipe(num_inference_steps=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).audios
UpperCAmelCase__ :Any = sampling.iplms_sample(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , {} )
UpperCAmelCase__ :List[Any] = generated.clamp(-1 , 1 )
UpperCAmelCase__ :int = (generated - audio).abs().sum()
UpperCAmelCase__ :Optional[Any] = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , SCREAMING_SNAKE_CASE )
print('Diff max' , SCREAMING_SNAKE_CASE )
assert diff_max < 1E-3, f"""Diff max: {diff_max} is too much :-/"""
print(f"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
__snake_case : Union[str, Any] = parser.parse_args()
main(args)
| 433
| 0
|
def _UpperCAmelCase (UpperCamelCase__ : str = 50 ):
_A : Optional[Any] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"{solution() = }")
| 503
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] ="levit"
def __init__( self : Optional[int] , a : int=2_24 , a : Union[str, Any]=3 , a : List[Any]=3 , a : Dict=2 , a : Tuple=1 , a : Optional[int]=16 , a : Optional[Any]=[1_28, 2_56, 3_84] , a : Dict=[4, 8, 12] , a : Optional[Any]=[4, 4, 4] , a : Optional[int]=[16, 16, 16] , a : Any=0 , a : List[Any]=[2, 2, 2] , a : Optional[Any]=[2, 2, 2] , a : Union[str, Any]=0.02 , **a : Optional[int] , ):
"""simple docstring"""
super().__init__(**a )
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = kernel_size
__lowerCamelCase = stride
__lowerCamelCase = padding
__lowerCamelCase = hidden_sizes
__lowerCamelCase = num_attention_heads
__lowerCamelCase = depths
__lowerCamelCase = key_dim
__lowerCamelCase = drop_path_rate
__lowerCamelCase = patch_size
__lowerCamelCase = attention_ratio
__lowerCamelCase = mlp_ratio
__lowerCamelCase = initializer_range
__lowerCamelCase = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Tuple =version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return 1e-4
| 546
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 712
|
def lowerCAmelCase ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
__magic_name__ : List[str] = [True] * (num + 1)
__magic_name__ : List[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p, num + 1, UpperCAmelCase ):
__magic_name__ : Any = False
p += 1
return [prime for prime in range(2, num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 336
| 0
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( _a , _a , _a ) -> int:
'''simple docstring'''
return params[f"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def UpperCamelCase ( _a , _a , _a , _a="attention" ) -> Optional[int]:
'''simple docstring'''
lowercase_ :str = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
lowercase_ :List[str] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase_ :Tuple = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
lowercase_ :Any = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase_ :Any = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
lowercase_ :int = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase_ :Dict = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
lowercase_ :str = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def UpperCamelCase ( _a , _a , _a , _a=False ) -> int:
'''simple docstring'''
if split_mlp_wi:
lowercase_ :Dict = params[f"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
lowercase_ :Optional[Any] = params[f"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
lowercase_ :int = (wi_a, wi_a)
else:
lowercase_ :Optional[Any] = params[f"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
lowercase_ :Optional[int] = params[f"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def UpperCamelCase ( _a , _a , _a , _a ) -> List[Any]:
'''simple docstring'''
return params[f"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def UpperCamelCase ( _a , *, _a , _a , _a = False ) -> Optional[Any]:
'''simple docstring'''
lowercase_ :List[str] = traverse_util.flatten_dict(variables['''target'''] )
lowercase_ :Any = {'''/'''.join(A_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase_ :Optional[Any] = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , A_ )
lowercase_ :List[str] = collections.OrderedDict()
# Shared embeddings.
lowercase_ :List[Any] = old['''token_embedder/embedding''']
# Encoder.
for i in range(A_ ):
# Block i, layer 0 (Self Attention).
lowercase_ :Optional[Any] = tax_layer_norm_lookup(A_ , A_ , '''encoder''' , '''pre_attention_layer_norm''' )
lowercase_ , lowercase_ , lowercase_ , lowercase_ :List[Any] = tax_attention_lookup(A_ , A_ , '''encoder''' , '''attention''' )
lowercase_ :int = layer_norm
lowercase_ :Optional[int] = k.T
lowercase_ :Union[str, Any] = o.T
lowercase_ :Any = q.T
lowercase_ :int = v.T
# Block i, layer 1 (MLP).
lowercase_ :Optional[int] = tax_layer_norm_lookup(A_ , A_ , '''encoder''' , '''pre_mlp_layer_norm''' )
lowercase_ , lowercase_ :Optional[int] = tax_mlp_lookup(A_ , A_ , '''encoder''' , A_ )
lowercase_ :Union[str, Any] = layer_norm
if split_mlp_wi:
lowercase_ :Optional[Any] = wi[0].T
lowercase_ :List[str] = wi[1].T
else:
lowercase_ :List[Any] = wi.T
lowercase_ :Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ :List[Any] = tax_relpos_bias_lookup(
A_ , A_ , '''encoder''' ).T
lowercase_ :str = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
lowercase_ :str = tax_relpos_bias_lookup(
A_ , 0 , '''encoder''' ).T
lowercase_ :str = tax_relpos_bias_lookup(
A_ , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(A_ ):
# Block i, layer 0 (Self Attention).
lowercase_ :Optional[int] = tax_layer_norm_lookup(A_ , A_ , '''decoder''' , '''pre_self_attention_layer_norm''' )
lowercase_ , lowercase_ , lowercase_ , lowercase_ :Union[str, Any] = tax_attention_lookup(A_ , A_ , '''decoder''' , '''self_attention''' )
lowercase_ :Union[str, Any] = layer_norm
lowercase_ :int = k.T
lowercase_ :Any = o.T
lowercase_ :List[Any] = q.T
lowercase_ :List[Any] = v.T
# Block i, layer 1 (Cross Attention).
lowercase_ :Optional[int] = tax_layer_norm_lookup(A_ , A_ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
lowercase_ , lowercase_ , lowercase_ , lowercase_ :Optional[Any] = tax_attention_lookup(A_ , A_ , '''decoder''' , '''encoder_decoder_attention''' )
lowercase_ :Union[str, Any] = layer_norm
lowercase_ :Union[str, Any] = k.T
lowercase_ :str = o.T
lowercase_ :str = q.T
lowercase_ :Dict = v.T
# Block i, layer 2 (MLP).
lowercase_ :int = tax_layer_norm_lookup(A_ , A_ , '''decoder''' , '''pre_mlp_layer_norm''' )
lowercase_ , lowercase_ :Tuple = tax_mlp_lookup(A_ , A_ , '''decoder''' , A_ )
lowercase_ :List[Any] = layer_norm
if split_mlp_wi:
lowercase_ :Dict = wi[0].T
lowercase_ :str = wi[1].T
else:
lowercase_ :Any = wi.T
lowercase_ :Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ :int = tax_relpos_bias_lookup(A_ , A_ , '''decoder''' ).T
lowercase_ :str = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase_ :List[Any] = old['''decoder/logits_dense/kernel'''].T
return new
def UpperCamelCase ( _a , _a ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ :int = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase_ :str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase_ :Optional[int] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
lowercase_ :Optional[Any] = state_dict['''shared.weight''']
return state_dict
def UpperCamelCase ( _a , _a , _a , _a , _a ) -> Any:
'''simple docstring'''
lowercase_ :Dict = checkpoints.load_tax_checkpoint(A_ )
lowercase_ :Any = convert_tax_to_pytorch(
A_ , num_layers=config.num_layers , is_encoder_only=A_ , scalable_attention=A_ )
lowercase_ :str = make_state_dict(A_ , A_ )
model.load_state_dict(A_ , strict=A_ )
def UpperCamelCase ( _a , _a , _a , _a = False , _a = False , ) -> List[str]:
'''simple docstring'''
lowercase_ :int = MTaConfig.from_json_file(A_ )
print(f"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase_ :List[Any] = UMTaEncoderModel(A_ )
else:
lowercase_ :Optional[Any] = UMTaForConditionalGeneration(A_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(A_ , A_ , A_ , A_ , A_ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(A_ )
# Verify that we can load the checkpoint.
model.from_pretrained(A_ )
print('''Done''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 257
|
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : Any = logging.get_logger(__name__)
def A__ ( A_ , A_ ) -> List[Any]:
_lowercase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm1.weight""", F"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm1.bias""", F"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.weight""", F"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.bias""", F"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm2.weight""", F"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm2.bias""", F"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.weight""", F"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.bias""", F"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc2.weight""", F"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.mlp.fc2.bias""", F"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def A__ ( A_ , A_ ) -> Optional[Any]:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_lowercase = state_dict.pop(F"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
_lowercase = in_proj_weight[
: encoder_config.hidden_size, :
]
_lowercase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_lowercase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def A__ ( A_ , A_ , A_ ) -> str:
_lowercase = dct.pop(A_ )
_lowercase = val
def A__ ( A_ ) -> Optional[int]:
if "handwritten" in checkpoint_url:
_lowercase = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowercase = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
_lowercase = Image.open(requests.get(A_ , stream=A_ ).raw ).convert("RGB" )
return im
@torch.no_grad()
def A__ ( A_ , A_ ) -> str:
_lowercase = ViTConfig(image_size=384 , qkv_bias=A_ )
_lowercase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_lowercase = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
_lowercase = 1_024
_lowercase = 4_096
_lowercase = 24
_lowercase = 16
_lowercase = 1_024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowercase = False
_lowercase = "relu"
_lowercase = 1_024
_lowercase = True
_lowercase = False
_lowercase = False
# load HuggingFace model
_lowercase = ViTModel(A_ , add_pooling_layer=A_ )
_lowercase = TrOCRForCausalLM(A_ )
_lowercase = VisionEncoderDecoderModel(encoder=A_ , decoder=A_ )
model.eval()
# load state_dict of original model, rename some keys
_lowercase = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" , check_hash=A_ )["model"]
_lowercase = create_rename_keys(A_ , A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
read_in_q_k_v(A_ , A_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_lowercase = state_dict.pop(A_ )
if key.startswith("decoder" ) and "output_projection" not in key:
_lowercase = val
else:
_lowercase = val
# load state dict
model.load_state_dict(A_ )
# Check outputs on an image
_lowercase = ViTImageProcessor(size=encoder_config.image_size )
_lowercase = RobertaTokenizer.from_pretrained("roberta-large" )
_lowercase = TrOCRProcessor(A_ , A_ )
_lowercase = processor(images=prepare_img(A_ ) , return_tensors="pt" ).pixel_values
# verify logits
_lowercase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_lowercase = model(pixel_values=A_ , decoder_input_ids=A_ )
_lowercase = outputs.logits
_lowercase = torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
_lowercase = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
_lowercase = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
_lowercase = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
_lowercase = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , A_ , atol=1e-3 ), "First elements of logits not as expected"
Path(A_ ).mkdir(exist_ok=A_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(A_ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(A_ )
if __name__ == "__main__":
__magic_name__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__magic_name__ : List[Any] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 497
| 0
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
A_ : List[str] = logging.get_logger('transformers.models.speecht5')
A_ : List[str] = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
A_ : int = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
A_ : str = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
A_ : Any = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
A_ : Any = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
A_ : int = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
A_ : Any = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
A_ : Optional[Any] = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
A_ : str = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
A_ : str = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A_ : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A_ : int = []
A_ : Optional[Any] = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
A_ : Optional[Any] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
A_ : Union[str, Any] = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
A_ : Optional[int] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def UpperCamelCase (lowercase_: List[str] , lowercase_: str , lowercase_: str , lowercase_: Union[str, Any] , lowercase_: Optional[int] ) -> int:
for attribute in key.split(""".""" ):
A__ : Dict = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
A__ : Tuple = getattr(lowercase_ , lowercase_ ).shape
else:
A__ : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
A__ : Any = value
elif weight_type == "weight_g":
A__ : Union[str, Any] = value
elif weight_type == "weight_v":
A__ : Optional[Any] = value
elif weight_type == "bias":
A__ : str = value
elif weight_type == "running_mean":
A__ : List[Any] = value
elif weight_type == "running_var":
A__ : str = value
elif weight_type == "num_batches_tracked":
A__ : List[Any] = value
else:
A__ : Optional[Any] = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: Optional[int] ) -> str:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A__ , A__ : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCamelCase (lowercase_: Dict , lowercase_: List[str] , lowercase_: Optional[Any] ) -> Optional[int]:
A__ : Any = []
if task == "s2t":
A__ : Any = hf_model.speechta.encoder.prenet.feature_encoder
A__ : Optional[Any] = MAPPING_S2T
A__ : List[Any] = IGNORE_KEYS_S2T
elif task == "t2s":
A__ : Any = None
A__ : Optional[int] = MAPPING_T2S
A__ : Optional[int] = IGNORE_KEYS_T2S
elif task == "s2s":
A__ : Tuple = hf_model.speechta.encoder.prenet.feature_encoder
A__ : List[str] = MAPPING_S2S
A__ : Dict = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(lowercase_ , lowercase_ ):
logger.info(f"""{name} was ignored""" )
continue
A__ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == """group""" , )
A__ : Any = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
A__ , A__ : Tuple = key.split(""".*.""" )
if prefix in name and suffix in name:
A__ : Optional[Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
A__ : Dict = True
if "*" in mapped_key:
A__ : int = name.split(lowercase_ )[0].split(""".""" )[-2]
A__ : List[str] = mapped_key.replace("""*""" , lowercase_ )
if "weight_g" in name:
A__ : Optional[Any] = """weight_g"""
elif "weight_v" in name:
A__ : List[Any] = """weight_v"""
elif "bias" in name:
A__ : int = """bias"""
elif "weight" in name:
A__ : Any = """weight"""
elif "running_mean" in name:
A__ : Optional[Any] = """running_mean"""
elif "running_var" in name:
A__ : Tuple = """running_var"""
elif "num_batches_tracked" in name:
A__ : Dict = """num_batches_tracked"""
else:
A__ : Union[str, Any] = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Any , lowercase_: Optional[Any] , lowercase_: str , lowercase_: Any ) -> List[str]:
A__ : int = full_name.split("""conv_layers.""" )[-1]
A__ : Optional[int] = name.split(""".""" )
A__ : Any = int(items[0] )
A__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
A__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
A__ : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
A__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
A__ : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: int , lowercase_: str , lowercase_: List[Any]=None , lowercase_: Tuple=None , lowercase_: Dict=None , ) -> Union[str, Any]:
if config_path is not None:
A__ : Tuple = SpeechTaConfig.from_pretrained(lowercase_ )
else:
A__ : Tuple = SpeechTaConfig()
if task == "s2t":
A__ : Optional[int] = config.max_text_positions
A__ : Any = SpeechTaForSpeechToText(lowercase_ )
elif task == "t2s":
A__ : List[Any] = 1876
A__ : str = 600
A__ : List[Any] = config.max_speech_positions
A__ : Tuple = SpeechTaForTextToSpeech(lowercase_ )
elif task == "s2s":
A__ : Dict = 1876
A__ : int = config.max_speech_positions
A__ : Any = SpeechTaForSpeechToSpeech(lowercase_ )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
A__ : Any = SpeechTaTokenizer(lowercase_ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
A__ : Optional[Any] = AddedToken("""<mask>""" , lstrip=lowercase_ , rstrip=lowercase_ )
A__ : List[Any] = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
A__ : Dict = SpeechTaFeatureExtractor()
A__ : List[Any] = SpeechTaProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
processor.save_pretrained(lowercase_ )
A__ : List[Any] = torch.load(lowercase_ )
recursively_load_weights(fairseq_checkpoint["""model"""] , lowercase_ , lowercase_ )
model.save_pretrained(lowercase_ )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
A_ : List[str] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 64
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A_ : Dict = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def UpperCamelCase (lowercase_: Optional[Any] ) -> Optional[int]:
A__ : List[Any] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
A_ : Any = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def UpperCamelCase (lowercase_: str ) -> Any:
A__ : Dict = list(s_dict.keys() )
for key in keys:
A__ : List[str] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A__ : List[Any] = new_key.replace(lowercase_ , lowercase_ )
print(f"""{key} -> {new_key}""" )
A__ : Tuple = s_dict.pop(lowercase_ )
return s_dict
def UpperCamelCase (lowercase_: Tuple ) -> Optional[int]:
A__ , A__ : Any = emb.weight.shape
A__ : str = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
A__ : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bytes:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
A__ : Tuple = os.path.basename(lowercase_ )
A__ : int = url.split("""/""" )[-2]
A__ : Dict = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ) and not os.path.isfile(lowercase_ ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowercase_ ):
A__ : Optional[Any] = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowercase_ ) as source, open(lowercase_ , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowercase_ , unit_divisor=1024 ) as loop:
while True:
A__ : Any = source.read(8192 )
if not buffer:
break
output.write(lowercase_ )
loop.update(len(lowercase_ ) )
A__ : Dict = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def UpperCamelCase (lowercase_: List[Any] , lowercase_: Tuple ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
A__ : Tuple = _download(_MODELS[checkpoint_path] )
else:
A__ : Optional[int] = torch.load(lowercase_ , map_location="""cpu""" )
A__ : str = original_checkpoint["""dims"""]
A__ : List[Any] = original_checkpoint["""model_state_dict"""]
A__ : Optional[Any] = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(lowercase_ )
rename_keys(lowercase_ )
A__ : List[str] = True
A__ : Optional[Any] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
A__ : List[Any] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowercase_ , decoder_ffn_dim=lowercase_ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
A__ : Optional[Any] = WhisperForConditionalGeneration(lowercase_ )
A__ , A__ : List[Any] = model.model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0 and not set(lowercase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
A__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ : str = proj_out_weights
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
A_ : Tuple = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 64
| 1
|
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ), F"""{len(lowerCamelCase__ )} != {len(lowerCamelCase__ )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
lowerCAmelCase__ = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
lowerCAmelCase__ = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
try:
lowercase__ : Union[str, Any] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(lowerCamelCase__ ) )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(lowerCamelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = "student" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ , ):
"""simple docstring"""
lowercase__ : Union[str, Any] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
AutoTokenizer.from_pretrained(lowerCamelCase__ ).save_pretrained(lowerCamelCase__ ) # purely for convenience
lowercase__ : int = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ ).eval()
else:
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), F"""teacher must be a model or string got type {type(lowerCamelCase__ )}"""
lowercase__ : Union[str, Any] = teacher.config.to_diff_dict()
try:
lowercase__ , lowercase__ : Dict = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
lowercase__ : List[str] = teacher_e
if d is None:
lowercase__ : Tuple = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
lowercase__ , lowercase__ : Any = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
lowercase__ , lowercase__ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
lowercase__ : int = teacher_e
if d is None:
lowercase__ : List[str] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCamelCase__ )
# Copy weights
lowercase__ : int = teacher.config_class(**lowerCamelCase__ )
lowercase__ : Dict = AutoModelForSeqaSeqLM.from_config(lowerCamelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
lowercase__ : Dict = student.load_state_dict(teacher.state_dict() , strict=lowerCamelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
lowercase__ , lowercase__ : Any = list(range(lowerCamelCase__ ) ), list(range(lowerCamelCase__ ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(lowerCamelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
lowercase__ : List[int] = pick_layers_to_copy(lowerCamelCase__ , lowerCamelCase__ )
if d_layers_to_copy is None:
lowercase__ : List[int] = pick_layers_to_copy(lowerCamelCase__ , lowerCamelCase__ )
try:
if hasattr(
lowerCamelCase__ , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCamelCase__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCamelCase__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCamelCase__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCamelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCamelCase__ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCamelCase__ )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
lowercase__ : Tuple = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCamelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 496
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""image_processor""", """tokenizer"""]
lowercase_ = """CLIPImageProcessor"""
lowercase_ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[str]=None , **SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , SCREAMING_SNAKE_CASE , )
lowercase__ : int = kwargs.pop("feature_extractor" )
lowercase__ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : str=None , **SCREAMING_SNAKE_CASE : Tuple ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ : Optional[int] = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if images is not None:
lowercase__ : Dict = self.image_processor(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
lowercase__ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE ) , tensor_type=SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Optional[int] ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Dict ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : int ):
lowercase__ : List[Any] = self.tokenizer.model_input_names
lowercase__ : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case ( self : str ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def snake_case ( self : str ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , SCREAMING_SNAKE_CASE , )
return self.image_processor
| 496
| 1
|
"""simple docstring"""
import math
from datetime import datetime, timedelta
def A( snake_case_ ):
"""simple docstring"""
lowercase__: Union[str, Any] = year % 19
lowercase__: Optional[int] = year % 4
lowercase__: List[str] = year % 7
lowercase__: List[str] = math.floor(year / 100 )
lowercase__: Any = math.floor((13 + 8 * leap_day_inhibits) / 25 )
lowercase__: Any = leap_day_inhibits / 4
lowercase__: Optional[int] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
lowercase__: Optional[int] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
lowercase__: List[str] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
lowercase__: List[str] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__SCREAMING_SNAKE_CASE , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__SCREAMING_SNAKE_CASE , 4 , 18 )
else:
return datetime(__SCREAMING_SNAKE_CASE , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
UpperCamelCase = """will be""" if year > datetime.now().year else """was"""
print(F"Easter in {year} {tense} {gauss_easter(year)}")
| 701
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _a ( metaclass=lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = ["""torch""", """scipy"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["torch", "scipy"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "scipy"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "scipy"])
| 120
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCAmelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = ShapEPipeline
A__ : int = ['prompt']
A__ : Dict = ['prompt']
A__ : Union[str, Any] = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A__ : Any = False
@property
def _lowercase ( self ) -> str:
return 32
@property
def _lowercase ( self ) -> str:
return 32
@property
def _lowercase ( self ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def _lowercase ( self ) -> Optional[Any]:
return 8
@property
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _lowercase ( self ) -> List[Any]:
torch.manual_seed(0 )
_UpperCamelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_snake_case )
@property
def _lowercase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
_UpperCamelCase : Tuple = PriorTransformer(**_snake_case )
return model
@property
def _lowercase ( self ) -> int:
torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
_UpperCamelCase : List[Any] = ShapERenderer(**_snake_case )
return model
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = self.dummy_prior
_UpperCamelCase : int = self.dummy_text_encoder
_UpperCamelCase : Any = self.dummy_tokenizer
_UpperCamelCase : Union[str, Any] = self.dummy_renderer
_UpperCamelCase : Union[str, Any] = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=_snake_case , clip_sample=_snake_case , clip_sample_range=1.0 , )
_UpperCamelCase : Dict = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def _lowercase ( self , _snake_case , _snake_case=0 ) -> Optional[int]:
if str(_snake_case ).startswith('''mps''' ):
_UpperCamelCase : List[Any] = torch.manual_seed(_snake_case )
else:
_UpperCamelCase : Any = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_UpperCamelCase : int = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def _lowercase ( self ) -> str:
_UpperCamelCase : Tuple = '''cpu'''
_UpperCamelCase : Optional[int] = self.get_dummy_components()
_UpperCamelCase : int = self.pipeline_class(**_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = pipe(**self.get_dummy_inputs(_snake_case ) )
_UpperCamelCase : int = output.images[0]
_UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCamelCase : Tuple = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self ) -> Union[str, Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _lowercase ( self ) -> Union[str, Any]:
_UpperCamelCase : List[str] = torch_device == '''cpu'''
_UpperCamelCase : Tuple = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_snake_case , relax_max_difference=_snake_case , )
def _lowercase ( self ) -> List[str]:
_UpperCamelCase : int = self.get_dummy_components()
_UpperCamelCase : Optional[Any] = self.pipeline_class(**_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = 1
_UpperCamelCase : str = 2
_UpperCamelCase : Optional[int] = self.get_dummy_inputs(_snake_case )
for key in inputs.keys():
if key in self.batch_params:
_UpperCamelCase : Any = batch_size * [inputs[key]]
_UpperCamelCase : List[Any] = pipe(**_snake_case , num_images_per_prompt=_snake_case )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
_UpperCamelCase : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
_UpperCamelCase : Dict = ShapEPipeline.from_pretrained('''openai/shap-e''' )
_UpperCamelCase : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(0 )
_UpperCamelCase : int = pipe(
'''a shark''' , generator=_snake_case , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_snake_case , _snake_case )
| 683
|
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_UpperCAmelCase : Dict = """bart"""
_UpperCAmelCase : List[str] = True
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> int:
if LOAD_DENSE_INDEX:
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase : Tuple = qar_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase : Any = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase : Dict = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase : Tuple = sas_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = make_qa_sas_model(
model_name='''t5-small''' ,from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' ,device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
_UpperCamelCase : str = faiss.StandardGpuResources()
_UpperCamelCase : Optional[int] = datasets.load_dataset(path='''wiki_snippets''' ,name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase : List[str] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(wikiaab_passages.num_rows, 1_28) ,)
_UpperCamelCase : Any = faiss.IndexFlatIP(1_28 )
_UpperCamelCase : str = faiss.index_cpu_to_gpu(UpperCamelCase ,1 ,UpperCamelCase )
wikiaab_gpu_index_flat.add(UpperCamelCase ) # TODO fix for larger GPU
else:
_UpperCamelCase, _UpperCamelCase : Optional[int] = (None, None)
_UpperCamelCase : int = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : List[Any] = datasets.load_dataset('''eli5''' ,name='''LFQA_reddit''' )
_UpperCamelCase : Optional[int] = elia['''train_eli5''']
_UpperCamelCase : Any = np.memmap(
'''eli5_questions_reps.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase : Optional[Any] = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(UpperCamelCase )
return (elia_train, eli5_train_q_index)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_indexes()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_models()
_UpperCAmelCase , _UpperCAmelCase : int = load_train_data()
def snake_case__ ( UpperCamelCase ,UpperCamelCase=10 ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = embed_questions_for_retrieval([question] ,UpperCamelCase ,UpperCamelCase )
_UpperCamelCase, _UpperCamelCase : Optional[Any] = eli5_train_q_index.search(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = [elia_train[int(UpperCamelCase )] for i in I[0]]
return nn_examples
def snake_case__ ( UpperCamelCase ,UpperCamelCase="wiki40b" ,UpperCamelCase="dense" ,UpperCamelCase=10 ) -> Optional[int]:
if source == "none":
_UpperCamelCase, _UpperCamelCase : Dict = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase, _UpperCamelCase : str = query_qa_dense_index(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
else:
_UpperCamelCase, _UpperCamelCase : str = query_es_index(
UpperCamelCase ,UpperCamelCase ,index_name='''english_wiki40b_snippets_100w''' ,n_results=UpperCamelCase ,)
_UpperCamelCase : Optional[int] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase : Optional[Any] = '''question: {} context: {}'''.format(UpperCamelCase ,UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase : None),
} )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=64 ,UpperCamelCase=2_56 ,UpperCamelCase=False ,UpperCamelCase=2 ,UpperCamelCase=0.95 ,UpperCamelCase=0.8 ) -> Optional[Any]:
with torch.no_grad():
_UpperCamelCase : Any = qa_sas_generate(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,num_answers=1 ,num_beams=UpperCamelCase ,min_len=UpperCamelCase ,max_len=UpperCamelCase ,do_sample=UpperCamelCase ,temp=UpperCamelCase ,top_p=UpperCamelCase ,top_k=UpperCamelCase ,max_input_length=10_24 ,device='''cuda:0''' ,)[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_UpperCAmelCase : str = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_UpperCAmelCase : Tuple = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_UpperCAmelCase : Dict = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_UpperCAmelCase : List[str] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_UpperCAmelCase : Optional[int] = st.sidebar.checkbox("""Demo options""")
if demo_options:
_UpperCAmelCase : List[str] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_UpperCAmelCase : List[Any] = action_list.index(action_st)
_UpperCAmelCase : Tuple = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_UpperCAmelCase : Optional[Any] = show_type == """Show full text of passages"""
else:
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : str = True
_UpperCAmelCase : str = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_UpperCAmelCase : Optional[Any] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_UpperCAmelCase : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_UpperCAmelCase : Dict = """wiki40b"""
_UpperCAmelCase : str = """dense"""
_UpperCAmelCase : List[str] = """beam"""
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : List[str] = 64
_UpperCAmelCase : List[Any] = 256
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = st.sidebar.checkbox("""Generation options""")
if generate_options:
_UpperCAmelCase : Union[str, Any] = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_UpperCAmelCase : Dict = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_UpperCAmelCase : List[Any] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_UpperCAmelCase : List[str] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[int] = None
# start main text
_UpperCAmelCase : Union[str, Any] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_UpperCAmelCase : int = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_UpperCAmelCase : Any = st.text_input("""Enter your question here:""", """""")
else:
_UpperCAmelCase : Tuple = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_UpperCAmelCase , _UpperCAmelCase : str = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_UpperCAmelCase , _UpperCAmelCase : List[Any] = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_UpperCAmelCase : int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_UpperCAmelCase : int = support_list[:10]
_UpperCAmelCase : Tuple = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_UpperCAmelCase , _UpperCAmelCase : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_UpperCAmelCase : Tuple = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_UpperCAmelCase : List[Any] = res[1].strip()
if sec_titles == "":
_UpperCAmelCase : Optional[int] = """[{}]({})""".format(res[0], wiki_url)
else:
_UpperCAmelCase : Optional[int] = sec_titles.split(""" & """)
_UpperCAmelCase : Tuple = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_UpperCAmelCase : Dict = find_nearest_training(question)
_UpperCAmelCase : List[Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_UpperCAmelCase : List[Any] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_UpperCAmelCase : List[Any] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 683
| 1
|
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCamelCase = re.compile(R"""\s+""")
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_SCREAMING_SNAKE_CASE , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def a__ ( _SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = [len(_SCREAMING_SNAKE_CASE ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_SCREAMING_SNAKE_CASE ), "line_max": max(_SCREAMING_SNAKE_CASE )}
def a__ ( _SCREAMING_SNAKE_CASE : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str=5 ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = ["auto-generated", "autogenerated", "automatically generated"]
UpperCAmelCase_ : Optional[Any] = example["content"].splitlines()
for _, line in zip(range(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int]=5 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.05 ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = ["unit tests", "test file", "configuration file"]
UpperCAmelCase_ : Dict = example["content"].splitlines()
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : Optional[Any] = 0
# first test
for _, line in zip(range(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
UpperCAmelCase_ : Tuple = example["content"].count("\n" )
UpperCAmelCase_ : Dict = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = ["def ", "class ", "for ", "while "]
UpperCAmelCase_ : Dict = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any=4 ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = example["content"].splitlines()
UpperCAmelCase_ : int = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = tokenizer(example["content"] , truncation=_SCREAMING_SNAKE_CASE )["input_ids"]
UpperCAmelCase_ : str = len(example["content"] ) / len(_SCREAMING_SNAKE_CASE )
return {"ratio": ratio}
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : str = {}
results.update(get_hash(_SCREAMING_SNAKE_CASE ) )
results.update(line_stats(_SCREAMING_SNAKE_CASE ) )
results.update(alpha_stats(_SCREAMING_SNAKE_CASE ) )
results.update(char_token_ratio(_SCREAMING_SNAKE_CASE ) )
results.update(is_autogenerated(_SCREAMING_SNAKE_CASE ) )
results.update(is_config_or_test(_SCREAMING_SNAKE_CASE ) )
results.update(has_no_keywords(_SCREAMING_SNAKE_CASE ) )
results.update(has_few_assignments(_SCREAMING_SNAKE_CASE ) )
return results
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
if not check_uniques(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , "rb" ) as f_in:
with gzip.open(str(_SCREAMING_SNAKE_CASE ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
os.unlink(_SCREAMING_SNAKE_CASE )
# Settings
_lowerCamelCase = HfArgumentParser(PreprocessingArguments)
_lowerCamelCase = parser.parse_args()
if args.num_workers is None:
_lowerCamelCase = multiprocessing.cpu_count()
_lowerCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCamelCase = time.time()
_lowerCamelCase = load_dataset(args.dataset_name, split="""train""")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
_lowerCamelCase = time.time()
_lowerCamelCase = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
_lowerCamelCase = set(ds.unique("""hash"""))
_lowerCamelCase = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
_lowerCamelCase = time.time()
_lowerCamelCase = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCamelCase = time.time()
_lowerCamelCase , _lowerCamelCase = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
_lowerCamelCase = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
_lowerCamelCase = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
_lowerCamelCase = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCamelCase = str(data_dir / f"""file-{file_number+1:012}.json""")
_lowerCamelCase = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 323
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323
| 1
|
"""simple docstring"""
lowercase__ = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 630
|
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=99 , lowercase=64 , lowercase=5 , lowercase=4 , lowercase=64 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
_lowerCamelCase : List[str] = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : List[Any] = is_training
_lowerCamelCase : int = use_input_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Optional[int] = type_sequence_label_size
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : Tuple = num_labels
_lowerCamelCase : int = num_choices
_lowerCamelCase : int = scope
def A_ ( self ):
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def A_ ( self ):
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_input_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Any = None
if self.use_labels:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = MPNetModel(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Tuple = model(lowercase , lowercase )
_lowerCamelCase : Tuple = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : str = MPNetForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : str = model(
lowercase , attention_mask=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : int = MPNetForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : List[str] = self.num_choices
_lowerCamelCase : List[str] = MPNetForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Any = model(
lowercase , attention_mask=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Any = self.num_labels
_lowerCamelCase : Union[str, Any] = MPNetForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self ):
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
((_lowerCamelCase), (_lowerCamelCase), (_lowerCamelCase), (_lowerCamelCase), (_lowerCamelCase), (_lowerCamelCase)) : List[Any] = config_and_inputs
_lowerCamelCase : str = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = True
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = MPNetModelTester(self )
_lowerCamelCase : List[Any] = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowercase )
def A_ ( self ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase )
def A_ ( self ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase )
def A_ ( self ):
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : Optional[Any] = MPNetModel.from_pretrained('microsoft/mpnet-base' )
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_lowerCamelCase : Any = model(lowercase )[0]
_lowerCamelCase : Dict = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase )
_lowerCamelCase : List[Any] = torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) )
| 630
| 1
|
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCAmelCase__ ( a__ = 2_000_000 ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = [0]
_UpperCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_UpperCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
_UpperCamelCase = 0
# an estimate of b, using the quadratic formula
_UpperCamelCase = 42
# the largest integer less than b_estimate
_UpperCamelCase = 42
# the largest integer less than b_estimate
_UpperCamelCase = 42
# the triangle number corresponding to b_floor
_UpperCamelCase = 42
# the triangle number corresponding to b_ceil
_UpperCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_UpperCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_UpperCamelCase = floor(lowercase__ )
_UpperCamelCase = ceil(lowercase__ )
_UpperCamelCase = triangle_numbers[b_floor]
_UpperCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_UpperCamelCase = triangle_b_first_guess * triangle_a
_UpperCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_UpperCamelCase = triangle_b_second_guess * triangle_a
_UpperCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"{solution() = }")
| 719
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = 1.5
_UpperCamelCase = int(factor * num_class_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=a__ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_UpperCamelCase = client.query(text=a__ )
if len(a__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
_UpperCamelCase = int(factor * num_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 , )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = tqdm(desc="downloading real regularization images" , total=a__ )
with open(f'{class_data_dir}/caption.txt' , "w" ) as fa, open(f'{class_data_dir}/urls.txt' , "w" ) as fa, open(
f'{class_data_dir}/images.txt' , "w" ) as fa:
while total < num_class_images:
_UpperCamelCase = class_images[count]
count += 1
try:
_UpperCamelCase = requests.get(images["url"] )
if img.status_code == 200:
_UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser("" , add_help=a__ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=a__ , type=a__ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=a__ , type=a__ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=a__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 82
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str ="pix2struct_text_model"
SCREAMING_SNAKE_CASE_ : Optional[Any] =["past_key_values"]
SCREAMING_SNAKE_CASE_ : Dict ={
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[str]=5_02_44 , SCREAMING_SNAKE_CASE__ : List[Any]=7_68 , SCREAMING_SNAKE_CASE__ : Dict=64 , SCREAMING_SNAKE_CASE__ : int=20_48 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : str=12 , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : List[Any]=1_28 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1e-6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu_new" , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : str=True , **SCREAMING_SNAKE_CASE__ : Any , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = d_kv
UpperCamelCase = d_ff
UpperCamelCase = num_layers
UpperCamelCase = num_heads
UpperCamelCase = relative_attention_num_buckets
UpperCamelCase = relative_attention_max_distance
UpperCamelCase = dropout_rate
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_factor
UpperCamelCase = use_cache
UpperCamelCase = eos_token_id
UpperCamelCase = decoder_start_token_id
# for backwards compatibility
UpperCamelCase = dense_act_fn
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , is_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
@classmethod
def __lowerCAmelCase ( cls : Dict , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
UpperCamelCase , UpperCamelCase = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCamelCase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] ="pix2struct_vision_model"
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=7_68 , SCREAMING_SNAKE_CASE__ : int=7_68 , SCREAMING_SNAKE_CASE__ : Optional[int]=20_48 , SCREAMING_SNAKE_CASE__ : str=64 , SCREAMING_SNAKE_CASE__ : Any=12 , SCREAMING_SNAKE_CASE__ : Any=12 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu_new" , SCREAMING_SNAKE_CASE__ : Dict=1e-6 , SCREAMING_SNAKE_CASE__ : Dict=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.0 , SCREAMING_SNAKE_CASE__ : Any=1e-10 , SCREAMING_SNAKE_CASE__ : str=1.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=40_96 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_28 , **SCREAMING_SNAKE_CASE__ : str , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = hidden_size
UpperCamelCase = patch_embed_hidden_size
UpperCamelCase = d_ff
UpperCamelCase = dropout_rate
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = initializer_range
UpperCamelCase = initializer_factor
UpperCamelCase = attention_dropout
UpperCamelCase = layer_norm_eps
UpperCamelCase = dense_act_fn
UpperCamelCase = seq_len
UpperCamelCase = relative_attention_num_buckets
UpperCamelCase = relative_attention_max_distance
UpperCamelCase = d_kv
@classmethod
def __lowerCAmelCase ( cls : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
UpperCamelCase , UpperCamelCase = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCamelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] ="pix2struct"
SCREAMING_SNAKE_CASE_ : str =True
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=1.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Any=True , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text_config is None:
UpperCamelCase = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCamelCase = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCamelCase = PixaStructTextConfig(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.text_config.decoder_start_token_id
UpperCamelCase = self.text_config.pad_token_id
UpperCamelCase = self.text_config.eos_token_id
UpperCamelCase = initializer_factor
UpperCamelCase = initializer_range
UpperCamelCase = self.initializer_range
UpperCamelCase = self.initializer_range
UpperCamelCase = is_vqa
@classmethod
def __lowerCAmelCase ( cls : Any , SCREAMING_SNAKE_CASE__ : PixaStructTextConfig , SCREAMING_SNAKE_CASE__ : PixaStructVisionConfig , **SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = copy.deepcopy(self.__dict__ )
UpperCamelCase = self.text_config.to_dict()
UpperCamelCase = self.vision_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output
| 282
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
_snake_case , _snake_case = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
_snake_case = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
_snake_case = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_snake_case = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 282
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 536
|
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase__( enum.Enum ):
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Optional[int] = 1
lowerCAmelCase__ : List[Any] = 2
@add_end_docstrings(__A )
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Optional[Any] = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
A__ = None
if self.model.config.prefix is not None:
A__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
A__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
A__ , A__ , A__ = self._sanitize_parameters(prefix=__UpperCAmelCase ,**self._forward_params )
A__ = {**self._preprocess_params, **preprocess_params}
A__ = {**self._forward_params, **forward_params}
def snake_case__ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Dict:
A__ = {}
if prefix is not None:
A__ = prefix
if prefix:
A__ = self.tokenizer(
__UpperCAmelCase ,padding=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors=self.framework )
A__ = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
A__ = handle_long_generation
preprocess_params.update(__UpperCAmelCase )
A__ = generate_kwargs
A__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
A__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
A__ = ReturnType.TENSORS
if return_type is not None:
A__ = return_type
if clean_up_tokenization_spaces is not None:
A__ = clean_up_tokenization_spaces
if stop_sequence is not None:
A__ = self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
if len(__UpperCAmelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
A__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case__ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*__UpperCAmelCase ,**__UpperCAmelCase )
def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase="" ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Dict:
A__ = self.tokenizer(
prefix + prompt_text ,padding=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors=self.framework )
A__ = prompt_text
if handle_long_generation == "hole":
A__ = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
A__ = generate_kwargs['max_new_tokens']
else:
A__ = generate_kwargs.get('max_length' ,self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
A__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
A__ = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
A__ = inputs['attention_mask'][:, -keep_length:]
return inputs
def snake_case__ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
A__ = model_inputs['input_ids']
A__ = model_inputs.get('attention_mask' ,__UpperCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
A__ = None
A__ = None
A__ = 1
else:
A__ = input_ids.shape[0]
A__ = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
A__ = generate_kwargs.pop('prefix_length' ,0 )
if prefix_length > 0:
A__ = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
A__ = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
A__ = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
A__ = self.model.generate(input_ids=__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,**__UpperCAmelCase )
A__ = generated_sequence.shape[0]
if self.framework == "pt":
A__ = generated_sequence.reshape(__UpperCAmelCase ,out_b // in_b ,*generated_sequence.shape[1:] )
elif self.framework == "tf":
A__ = tf.reshape(__UpperCAmelCase ,(in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=ReturnType.FULL_TEXT ,__UpperCAmelCase=True ) -> str:
A__ = model_outputs['generated_sequence'][0]
A__ = model_outputs['input_ids']
A__ = model_outputs['prompt_text']
A__ = generated_sequence.numpy().tolist()
A__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
A__ = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
A__ = self.tokenizer.decode(
__UpperCAmelCase ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
A__ = 0
else:
A__ = len(
self.tokenizer.decode(
input_ids[0] ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,) )
if return_type == ReturnType.FULL_TEXT:
A__ = prompt_text + text[prompt_length:]
else:
A__ = text[prompt_length:]
A__ = {'generated_text': all_text}
records.append(__UpperCAmelCase )
return records
| 536
| 1
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
SCREAMING_SNAKE_CASE_ = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def lowercase__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Union[str, Any]=None ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = True
while ask_again:
UpperCAmelCase = input(lowerCAmelCase )
try:
if default is not None and len(lowerCAmelCase ) == 0:
return default
return convert_value(lowerCAmelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCAmelCase )
def lowercase__ ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any=[] , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Tuple=0 ) -> str:
"""simple docstring"""
UpperCAmelCase = BulletMenu(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = menu.run(default_choice=lowerCAmelCase )
return convert_value(lowerCAmelCase ) if convert_value is not None else result
def lowercase__ ( lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = int(lowerCAmelCase )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def lowercase__ ( lowerCAmelCase : List[str] ) -> int:
"""simple docstring"""
UpperCAmelCase = int(lowerCAmelCase )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def lowercase__ ( lowerCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = int(lowerCAmelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowercase__ ( lowerCAmelCase : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase = int(lowerCAmelCase )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def lowercase__ ( lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = int(lowerCAmelCase )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def lowercase__ ( lowerCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class _UpperCAmelCase ( argparse.RawDescriptionHelpFormatter ):
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
UpperCAmelCase = super()._format_usage(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = usage.replace('<command> [<args>] ' , '' )
return usage
| 373
|
"""simple docstring"""
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE_ = '''scheduler_config.json'''
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : List[Any] = 1
__SCREAMING_SNAKE_CASE : Dict = 2
__SCREAMING_SNAKE_CASE : List[Any] = 3
__SCREAMING_SNAKE_CASE : Any = 4
__SCREAMING_SNAKE_CASE : Any = 5
__SCREAMING_SNAKE_CASE : Union[str, Any] = 6
__SCREAMING_SNAKE_CASE : str = 7
__SCREAMING_SNAKE_CASE : Any = 8
__SCREAMING_SNAKE_CASE : Tuple = 9
__SCREAMING_SNAKE_CASE : int = 1_0
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1_1
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1_2
__SCREAMING_SNAKE_CASE : Dict = 1_3
__SCREAMING_SNAKE_CASE : Optional[Any] = 1_4
@dataclass
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : torch.FloatTensor
class _UpperCAmelCase :
__SCREAMING_SNAKE_CASE : str = SCHEDULER_CONFIG_NAME
__SCREAMING_SNAKE_CASE : Dict = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
@classmethod
def a_ ( cls , lowercase_ = None , lowercase_ = None , lowercase_=False , **lowercase_ , ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = cls.load_config(
pretrained_model_name_or_path=lowercase_ , subfolder=lowercase_ , return_unused_kwargs=lowercase_ , return_commit_hash=lowercase_ , **lowercase_ , )
return cls.from_config(lowercase_ , return_unused_kwargs=lowercase_ , **lowercase_ )
def a_ ( self , lowercase_ , lowercase_ = False , **lowercase_ ) -> str:
self.save_config(save_directory=lowercase_ , push_to_hub=lowercase_ , **lowercase_ )
@property
def a_ ( self ) -> Union[str, Any]:
return self._get_compatibles()
@classmethod
def a_ ( cls ) -> Any:
UpperCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
UpperCAmelCase = importlib.import_module(__name__.split('.' )[0] )
UpperCAmelCase = [
getattr(lowercase_ , lowercase_ ) for c in compatible_classes_str if hasattr(lowercase_ , lowercase_ )
]
return compatible_classes
| 373
| 1
|
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def _lowercase ( UpperCamelCase__ : Union[str, Any] ):
__A : Dict = tf.convert_to_tensor(UpperCamelCase__ )
__A : Optional[int] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ), x.dtype ) ))
return x * cdf
def _lowercase ( UpperCamelCase__ : Any ):
__A : int = tf.convert_to_tensor(UpperCamelCase__ )
__A : List[Any] = tf.cast(math.pi, x.dtype )
__A : Dict = tf.cast(0.044715, x.dtype )
__A : Dict = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCamelCase__, 3 )) ))
return x * cdf
def _lowercase ( UpperCamelCase__ : Optional[int] ):
__A : List[Any] = tf.convert_to_tensor(UpperCamelCase__ )
return x * tf.tanh(tf.math.softplus(UpperCamelCase__ ) )
def _lowercase ( UpperCamelCase__ : Union[str, Any] ):
__A : Any = tf.convert_to_tensor(UpperCamelCase__ )
__A : Union[str, Any] = tf.cast(0.044715, x.dtype )
__A : List[Any] = tf.cast(0.7978845608, x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def _lowercase ( UpperCamelCase__ : Optional[int] ):
__A : Any = tf.convert_to_tensor(UpperCamelCase__ )
__A : int = tf.cast(1.702, x.dtype )
return x * tf.math.sigmoid(coeff * x )
def _lowercase ( UpperCamelCase__ : Union[str, Any] ):
return tf.clip_by_value(_gelu(UpperCamelCase__ ), -10, 10 )
def _lowercase ( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int]=-1 ):
__A ,__A : Dict = tf.split(UpperCamelCase__, 2, axis=UpperCamelCase__ )
return a * tf.math.sigmoid(UpperCamelCase__ )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def _lowercase ( UpperCamelCase__ : Tuple ):
return tf.keras.activations.gelu(UpperCamelCase__, approximate=UpperCamelCase__ )
UpperCAmelCase_ : Any = tf.keras.activations.gelu
UpperCAmelCase_ : List[str] = approximate_gelu_wrap
else:
UpperCAmelCase_ : Dict = _gelu
UpperCAmelCase_ : List[str] = _gelu_new
UpperCAmelCase_ : str = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def _lowercase ( UpperCamelCase__ : Optional[int] ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 540
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _lowerCamelCase :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase=True , __lowercase=99 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=16 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=3 , __lowercase=4 , __lowercase=None , ):
"""simple docstring"""
__A : Optional[int] = parent
__A : Tuple = batch_size
__A : Optional[int] = seq_length
__A : Tuple = is_training
__A : Optional[Any] = use_input_mask
__A : Optional[Any] = use_token_type_ids
__A : Optional[int] = use_labels
__A : str = vocab_size
__A : Dict = hidden_size
__A : Tuple = num_hidden_layers
__A : Optional[int] = num_attention_heads
__A : str = intermediate_size
__A : List[Any] = hidden_act
__A : List[str] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : int = max_position_embeddings
__A : int = type_vocab_size
__A : int = type_sequence_label_size
__A : str = initializer_range
__A : str = num_labels
__A : str = num_choices
__A : Any = scope
def snake_case__ ( self ):
"""simple docstring"""
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Union[str, Any] = None
if self.use_input_mask:
__A : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__A : str = None
if self.use_token_type_ids:
__A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Union[str, Any] = None
__A : Optional[int] = None
__A : List[str] = None
if self.use_labels:
__A : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__A : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , use_stable_embedding=__lowercase , )
def snake_case__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
__A : List[str] = OpenLlamaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__A : Any = model(__lowercase , attention_mask=__lowercase )
__A : Dict = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
"""simple docstring"""
__A : List[str] = True
__A : int = OpenLlamaModel(__lowercase )
model.to(__lowercase )
model.eval()
__A : List[Any] = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
__A : Optional[Any] = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , )
__A : Optional[Any] = model(__lowercase , attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
"""simple docstring"""
__A : Dict = OpenLlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
__A : Optional[Any] = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
"""simple docstring"""
__A : List[Any] = True
__A : Optional[int] = True
__A : Dict = OpenLlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
# first forward pass
__A : List[str] = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , use_cache=__lowercase , )
__A : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__A : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__A : int = torch.cat([input_ids, next_tokens] , dim=-1 )
__A : Dict = torch.cat([input_mask, next_mask] , dim=-1 )
__A : Tuple = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , output_hidden_states=__lowercase , )['hidden_states'][0]
__A : Any = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , past_key_values=__lowercase , output_hidden_states=__lowercase , )['hidden_states'][0]
# select random slice
__A : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__A : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1E-3 ) )
def snake_case__ ( self ):
"""simple docstring"""
__A : int = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Optional[Any] = config_and_inputs
__A : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__lowercase : List[Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__lowercase : Optional[int] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : Union[str, Any] = False
__lowercase : Union[str, Any] = False
def snake_case__ ( self ):
"""simple docstring"""
__A : Optional[int] = OpenLlamaModelTester(self )
__A : List[str] = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def snake_case__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self ):
"""simple docstring"""
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
__A : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A : List[str] = type
self.model_tester.create_and_check_model(*__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
__A ,__A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Union[str, Any] = 3
__A : int = input_dict['input_ids']
__A : int = input_ids.ne(1 ).to(__lowercase )
__A : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Optional[Any] = OpenLlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__A : Optional[int] = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__ ( self ):
"""simple docstring"""
__A ,__A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = 3
__A : List[str] = 'single_label_classification'
__A : Dict = input_dict['input_ids']
__A : Dict = input_ids.ne(1 ).to(__lowercase )
__A : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Union[str, Any] = OpenLlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__A : Dict = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__ ( self ):
"""simple docstring"""
__A ,__A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__A : str = 3
__A : int = 'multi_label_classification'
__A : Union[str, Any] = input_dict['input_ids']
__A : str = input_ids.ne(1 ).to(__lowercase )
__A : Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__A : int = OpenLlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__A : Any = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def snake_case__ ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A ,__A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[Any] = ids_tensor([1, 10] , config.vocab_size )
__A : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : Union[str, Any] = OpenLlamaModel(__lowercase )
original_model.to(__lowercase )
original_model.eval()
__A : Optional[int] = original_model(__lowercase ).last_hidden_state
__A : int = original_model(__lowercase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : List[Any] = {'type': scaling_type, 'factor': 1_0.0}
__A : str = OpenLlamaModel(__lowercase )
scaled_model.to(__lowercase )
scaled_model.eval()
__A : Dict = scaled_model(__lowercase ).last_hidden_state
__A : List[str] = scaled_model(__lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__lowercase , __lowercase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowercase , __lowercase , atol=1E-5 ) )
| 540
| 1
|
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __UpperCamelCase ( lowercase__ : Optional[Any], lowercase__ : int, lowercase__ : List[Any] ):
'''simple docstring'''
__lowercase =OmegaConf.load(__lowerCamelCase )
__lowercase =torch.load(__lowerCamelCase, map_location='cpu' )['''model''']
__lowercase =list(state_dict.keys() )
# extract state_dict for VQVAE
__lowercase ={}
__lowercase ='''first_stage_model.'''
for key in keys:
if key.startswith(__lowerCamelCase ):
__lowercase =state_dict[key]
# extract state_dict for UNetLDM
__lowercase ={}
__lowercase ='''model.diffusion_model.'''
for key in keys:
if key.startswith(__lowerCamelCase ):
__lowercase =state_dict[key]
__lowercase =config.model.params.first_stage_config.params
__lowercase =config.model.params.unet_config.params
__lowercase =VQModel(**__lowerCamelCase ).eval()
vqvae.load_state_dict(__lowerCamelCase )
__lowercase =UNetLDMModel(**__lowerCamelCase ).eval()
unet.load_state_dict(__lowerCamelCase )
__lowercase =DDIMScheduler(
timesteps=config.model.params.timesteps, beta_schedule='scaled_linear', beta_start=config.model.params.linear_start, beta_end=config.model.params.linear_end, clip_sample=__lowerCamelCase, )
__lowercase =LDMPipeline(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
pipeline.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
UpperCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 119
|
"""simple docstring"""
lowerCAmelCase_ = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.602176634E-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.3_5_5_8_1_8,
}
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowercase__ : Optional[Any] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(__lowerCamelCase )}"""
)
raise ValueError(__lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 560
| 0
|
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowercase_ ( _lowercase : Any , _lowercase : List[Any]=False ):
'''simple docstring'''
UpperCAmelCase : str = OmegaConf.load(_lowercase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowercase ) ) )
return config
def lowercase_ ( _lowercase : Optional[int] , _lowercase : int=None , _lowercase : Tuple=None ):
'''simple docstring'''
if conf_path is None:
UpperCAmelCase : Optional[Any] = "./model_checkpoints/vqgan_only.yaml"
UpperCAmelCase : Union[str, Any] = load_config(_lowercase , display=_lowercase )
UpperCAmelCase : int = VQModel(**config.model.params )
if ckpt_path is None:
UpperCAmelCase : Tuple = "./model_checkpoints/vqgan_only.pt"
UpperCAmelCase : int = torch.load(_lowercase , map_location=_lowercase )
if ".ckpt" in ckpt_path:
UpperCAmelCase : List[str] = sd["state_dict"]
model.load_state_dict(_lowercase , strict=_lowercase )
model.to(_lowercase )
del sd
return model
def lowercase_ ( _lowercase : List[Any] , _lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = model.encode(_lowercase )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
UpperCAmelCase : List[str] = model.decode(_lowercase )
return xrec
def lowercase_ ( _lowercase : Dict , _lowercase : List[str]=False ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
UpperCAmelCase : Optional[int] = importlib.import_module(_lowercase )
importlib.reload(_lowercase )
return getattr(importlib.import_module(_lowercase , package=_lowercase ) , cls )
def lowercase_ ( _lowercase : int ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def lowercase_ ( _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : Optional[int]=True , _lowercase : str=True ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = instantiate_from_config(_lowercase )
if sd is not None:
model.load_state_dict(_lowercase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowercase_ ( _lowercase : str , _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Dict ):
'''simple docstring'''
if ckpt:
UpperCAmelCase : Dict = torch.load(_lowercase , map_location="cpu" )
UpperCAmelCase : List[str] = pl_sd["global_step"]
print(F"""loaded model from global step {global_step}.""" )
else:
UpperCAmelCase : Union[str, Any] = {"state_dict": None}
UpperCAmelCase : Any = None
UpperCAmelCase : List[str] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowercase , eval_mode=_lowercase )["model"]
return model, global_step
| 292
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
snake_case_ : Union[str, Any] = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowercase_ ( _lowercase : List[str] ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def lowercase_ ( _lowercase : List[str] ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def lowercase_ ( _lowercase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowercase , id=_lowercase )
def lowercase_ ( _lowercase : str , _lowercase : Dict ):
'''simple docstring'''
if exitstatus == 5:
UpperCAmelCase : List[str] = 0
# Doctest custom flag to ignore output.
snake_case_ : Union[str, Any] = doctest.register_optionflag("""IGNORE_RESULT""")
snake_case_ : Optional[int] = doctest.OutputChecker
class snake_case__ ( lowerCAmelCase_ ):
def __lowerCAmelCase ( self : List[Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : int ):
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowercase , lowercase , lowercase )
snake_case_ : List[str] = CustomOutputChecker
snake_case_ : Optional[Any] = HfDoctestModule
snake_case_ : List[str] = HfDocTestParser
| 292
| 1
|
import math
import os
import sys
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = ''
try:
with open(_UpperCAmelCase , 'rb') as binary_file:
SCREAMING_SNAKE_CASE = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lexicon.pop(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = last_match_id
if math.loga(_UpperCAmelCase).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE = '0' + lexicon[curr_key]
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = '', ''
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
for i in range(len(_UpperCAmelCase)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
index += 1
SCREAMING_SNAKE_CASE = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
return result
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = os.path.getsize(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 8
try:
with open(_UpperCAmelCase , 'wb') as opened_file:
SCREAMING_SNAKE_CASE = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCAmelCase) , _UpperCAmelCase)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append('10000000')
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array:
opened_file.write(int(_UpperCAmelCase , 2).to_bytes(1 , byteorder='big'))
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = read_file_binary(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = compress_data(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = add_file_length(_UpperCAmelCase , _UpperCAmelCase)
write_file_binary(_UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 73
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a_ : Any = 'true'
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=16):
set_seed(42)
SCREAMING_SNAKE_CASE = RegressionModel()
SCREAMING_SNAKE_CASE = deepcopy(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = RegressionDataset(length=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase)
model.to(accelerator.device)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase)
return model, ddp_model, dataloader
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased')
SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc' , split='validation')
def tokenize_function(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase)
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(_UpperCAmelCase):
if use_longest:
return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt')
return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt')
return DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=16)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = Accelerator(dispatch_batches=_UpperCAmelCase , split_batches=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = get_dataloader(_UpperCAmelCase , not dispatch_batches)
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
for batch in dataloader:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
for logit, targ in logits_and_targets:
logits.append(_UpperCAmelCase)
targs.append(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.cat(_UpperCAmelCase), torch.cat(_UpperCAmelCase)
return logits, targs
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=16):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_basic_setup(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_predictions(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
assert (
len(_UpperCAmelCase) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCAmelCase)}'''
def lowerCamelCase__ (_UpperCAmelCase = False , _UpperCAmelCase = False):
SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_mrpc_setup(_UpperCAmelCase , _UpperCAmelCase)
# First do baseline
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['no']
model.to(_UpperCAmelCase)
model.eval()
for batch in dataloader:
batch.to(_UpperCAmelCase)
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=_UpperCAmelCase , references=batch['labels'])
SCREAMING_SNAKE_CASE = metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
SCREAMING_SNAKE_CASE = batch['labels']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=_UpperCAmelCase , references=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key]), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''')
test_mrpc(_UpperCAmelCase , _UpperCAmelCase)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase)
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''')
test_torch_metrics(_UpperCAmelCase , 99)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**')
SCREAMING_SNAKE_CASE = Accelerator()
test_torch_metrics(_UpperCAmelCase , 512)
accelerator.state._reset_state()
def lowerCamelCase__ (_UpperCAmelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 73
| 1
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : int ):
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=_A , )
assert hasattr(self , 'env' )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = {
'enabled': True,
'processes_per_host': 8,
}
lowerCamelCase__ = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
lowerCamelCase__ = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
lowerCamelCase__ = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='py36' , )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
TrainingJobAnalytics(_A ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(1,)] )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
lowerCamelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
lowerCamelCase__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , _A )
| 716
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659
| 0
|
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 444
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowercase (_A ):
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowerCAmelCase : Optional[int] = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@staticmethod
def a ( snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=snake_case__ , required=snake_case__ , help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' , type=snake_case__ , required=snake_case__ , help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' , type=snake_case__ , required=snake_case__ , help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' , type=snake_case__ , default='' , help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' , type=snake_case__ , default=snake_case__ , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=snake_case__ )
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : str = logging.get_logger('transformers-cli/converting' )
self._logger.info(F'Loading model {model_type}' )
_lowerCAmelCase : Any = model_type
_lowerCAmelCase : Any = tf_checkpoint
_lowerCAmelCase : List[str] = pytorch_dump_output
_lowerCAmelCase : int = config
_lowerCAmelCase : str = finetuning_task_name
def a ( self ):
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
if "ckpt" in self._tf_checkpoint.lower():
_lowerCAmelCase : List[Any] = self._tf_checkpoint
_lowerCAmelCase : Optional[int] = ''
else:
_lowerCAmelCase : int = self._tf_checkpoint
_lowerCAmelCase : Dict = ''
convert_transfo_xl_checkpoint_to_pytorch(
snake_case__ , self._config , self._pytorch_dump_output , snake_case__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 444
| 1
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
super().__init__()
self.register_modules(vqvae=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , ) -> Union[Tuple, ImagePipelineOutput]:
snake_case_ : List[str] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_SCREAMING_SNAKE_CASE , )
snake_case_ : int = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case_ : Optional[Any] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
snake_case_ : List[str] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case_ : Tuple = {}
if accepts_eta:
snake_case_ : Dict = eta
for t in self.progress_bar(self.scheduler.timesteps ):
snake_case_ : Any = self.scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# predict the noise residual
snake_case_ : Tuple = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : Any = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
# decode the image latents with the VAE
snake_case_ : Dict = self.vqvae.decode(_SCREAMING_SNAKE_CASE ).sample
snake_case_ : int = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Any = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 114
|
def lowerCAmelCase__ ( _a : int ):
snake_case_ : str = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCAmelCase__ ( _a : int ):
snake_case_ : List[str] = 0
while number > 0:
snake_case_ : Dict = number % 10
sum_of_digits += last_digit
snake_case_ : List[Any] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCAmelCase__ ( _a : int = 1_00 ):
snake_case_ : Optional[Any] = factorial(_a )
snake_case_ : Optional[int] = split_and_add(_a )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 114
| 1
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : int = emb.weight.shape
A_ : int = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
A_ : int = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
A_ : Optional[Any] = {}
for old_key in state_dict.keys():
A_ : Optional[Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
A_ : Dict = key.replace('''moe_layer.experts.0''' , f'''ffn.experts.expert_{expert_idx}''' )
else:
A_ : List[str] = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
A_ : List[Any] = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
A_ : List[Any] = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
A_ : Any = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
A_ : Optional[int] = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
A_ : int = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
A_ : Optional[Any] = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
A_ : List[Any] = state_dict[old_key]
return new_dict
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = WEIGHTS_NAME ):
A_ : Dict = []
A_ : int = 0
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
for expert in range(__UpperCAmelCase ):
A_ : Optional[int] = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__UpperCAmelCase ):
A_ : str = torch.load(__UpperCAmelCase )['model']
remove_ignore_keys_(__UpperCAmelCase )
A_ : Union[str, Any] = rename_fairseq_keys(__UpperCAmelCase , __UpperCAmelCase )
A_ : Dict = os.path.join(
__UpperCAmelCase , weights_name.replace('''.bin''' , f'''-{len(__UpperCAmelCase )+1:05d}-of-???.bin''' ) )
torch.save(__UpperCAmelCase , __UpperCAmelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__UpperCAmelCase )[0]].dtype )
# Add the last block
A_ : Any = os.path.join(__UpperCAmelCase , weights_name.replace('''.bin''' , f'''-{len(__UpperCAmelCase )+1:05d}-of-???.bin''' ) )
A_ : Dict = torch.load(switch_checkpoint_path + '''-shared.pt''' )['model']
remove_ignore_keys_(__UpperCAmelCase )
A_ : Optional[int] = rename_fairseq_keys(__UpperCAmelCase , __UpperCAmelCase )
A_ : Optional[int] = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__UpperCAmelCase ) == 1:
A_ : Optional[int] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
torch.save(__UpperCAmelCase , __UpperCAmelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__UpperCAmelCase , __UpperCAmelCase )
# Otherwise, let's build the index
A_ : Any = {}
for idx, shard in enumerate(__UpperCAmelCase ):
A_ : List[Any] = weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-{len(__UpperCAmelCase ):05d}.bin''' )
A_ : Any = os.path.join(__UpperCAmelCase , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__UpperCAmelCase , os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
for key in shard:
A_ : int = shard_file
# Add the metadata
A_ : Dict = {'total_size': total_size}
A_ : Dict = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , '''w''' , encoding='''utf-8''' ) as f:
A_ : Optional[Any] = json.dumps(__UpperCAmelCase , indent=2 , sort_keys=__UpperCAmelCase ) + '\n'
f.write(__UpperCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
UpperCamelCase = parser.parse_args()
UpperCamelCase , UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
UpperCamelCase = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 590
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : Tuple = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
__snake_case : Dict = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , x.transpose() ) )
__snake_case : Dict = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowercase_ ( self ):
__snake_case : str = np.random.randn(3 , 4 )
__snake_case : str = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , transpose(_UpperCAmelCase ).numpy() ) )
__snake_case : Optional[Any] = np.random.randn(3 , 4 , 5 )
__snake_case : List[str] = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , transpose(_UpperCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowercase_ ( self ):
__snake_case : Any = np.random.randn(3 , 4 )
__snake_case : Dict = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , transpose(_UpperCAmelCase ).numpy() ) )
__snake_case : Optional[int] = np.random.randn(3 , 4 , 5 )
__snake_case : int = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , transpose(_UpperCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowercase_ ( self ):
__snake_case : str = np.random.randn(3 , 4 )
__snake_case : Optional[Any] = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , np.asarray(transpose(_UpperCAmelCase ) ) ) )
__snake_case : List[Any] = np.random.randn(3 , 4 , 5 )
__snake_case : Any = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) ) ) )
def lowercase_ ( self ):
__snake_case : List[str] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , np.reshape(_UpperCAmelCase , (4, 3) ) ) )
__snake_case : Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , np.reshape(_UpperCAmelCase , (12, 5) ) ) )
@require_torch
def lowercase_ ( self ):
__snake_case : Optional[int] = np.random.randn(3 , 4 )
__snake_case : Optional[int] = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , reshape(_UpperCAmelCase , (4, 3) ).numpy() ) )
__snake_case : int = np.random.randn(3 , 4 , 5 )
__snake_case : int = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , reshape(_UpperCAmelCase , (12, 5) ).numpy() ) )
@require_tf
def lowercase_ ( self ):
__snake_case : str = np.random.randn(3 , 4 )
__snake_case : Optional[int] = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , reshape(_UpperCAmelCase , (4, 3) ).numpy() ) )
__snake_case : Dict = np.random.randn(3 , 4 , 5 )
__snake_case : List[str] = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , reshape(_UpperCAmelCase , (12, 5) ).numpy() ) )
@require_flax
def lowercase_ ( self ):
__snake_case : int = np.random.randn(3 , 4 )
__snake_case : Any = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , np.asarray(reshape(_UpperCAmelCase , (4, 3) ) ) ) )
__snake_case : Optional[Any] = np.random.randn(3 , 4 , 5 )
__snake_case : Any = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , np.asarray(reshape(_UpperCAmelCase , (12, 5) ) ) ) )
def lowercase_ ( self ):
__snake_case : Any = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , np.squeeze(_UpperCAmelCase ) ) )
__snake_case : List[str] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , np.squeeze(_UpperCAmelCase , axis=2 ) ) )
@require_torch
def lowercase_ ( self ):
__snake_case : Tuple = np.random.randn(1 , 3 , 4 )
__snake_case : int = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , squeeze(_UpperCAmelCase ).numpy() ) )
__snake_case : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
__snake_case : Tuple = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , squeeze(_UpperCAmelCase , axis=2 ).numpy() ) )
@require_tf
def lowercase_ ( self ):
__snake_case : List[Any] = np.random.randn(1 , 3 , 4 )
__snake_case : Optional[Any] = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , squeeze(_UpperCAmelCase ).numpy() ) )
__snake_case : Dict = np.random.randn(1 , 4 , 1 , 5 )
__snake_case : Dict = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , squeeze(_UpperCAmelCase , axis=2 ).numpy() ) )
@require_flax
def lowercase_ ( self ):
__snake_case : List[str] = np.random.randn(1 , 3 , 4 )
__snake_case : Optional[Any] = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , np.asarray(squeeze(_UpperCAmelCase ) ) ) )
__snake_case : List[str] = np.random.randn(1 , 4 , 1 , 5 )
__snake_case : Dict = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , np.asarray(squeeze(_UpperCAmelCase , axis=2 ) ) ) )
def lowercase_ ( self ):
__snake_case : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , np.expand_dims(_UpperCAmelCase , axis=1 ) ) )
@require_torch
def lowercase_ ( self ):
__snake_case : Dict = np.random.randn(3 , 4 )
__snake_case : List[str] = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , expand_dims(_UpperCAmelCase , axis=1 ).numpy() ) )
@require_tf
def lowercase_ ( self ):
__snake_case : int = np.random.randn(3 , 4 )
__snake_case : Optional[Any] = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , expand_dims(_UpperCAmelCase , axis=1 ).numpy() ) )
@require_flax
def lowercase_ ( self ):
__snake_case : Union[str, Any] = np.random.randn(3 , 4 )
__snake_case : Any = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , np.asarray(expand_dims(_UpperCAmelCase , axis=1 ) ) ) )
| 576
| 0
|
"""simple docstring"""
import math
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 ) ->str:
UpperCAmelCase__ = end or len(A_ )
for i in range(A_ , A_ ):
UpperCAmelCase__ = i
UpperCAmelCase__ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
UpperCAmelCase__ = array[temp_index - 1]
temp_index -= 1
UpperCAmelCase__ = temp_index_value
return array
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str: # Max Heap
UpperCAmelCase__ = index
UpperCAmelCase__ = 2 * index + 1 # Left Node
UpperCAmelCase__ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
UpperCAmelCase__ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
UpperCAmelCase__ = right_index
if largest != index:
UpperCAmelCase__ , UpperCAmelCase__ = array[largest], array[index]
heapify(A_ , A_ , A_ )
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->Dict:
UpperCAmelCase__ = len(A_ )
for i in range(n // 2 , -1 , -1 ):
heapify(A_ , A_ , A_ )
for i in range(n - 1 , 0 , -1 ):
UpperCAmelCase__ , UpperCAmelCase__ = array[0], array[i]
heapify(A_ , 0 , A_ )
return array
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
UpperCAmelCase__ = low
UpperCAmelCase__ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
UpperCAmelCase__ , UpperCAmelCase__ = array[j], array[i]
i += 1
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->Tuple:
if len(A_ ) == 0:
return array
UpperCAmelCase__ = 2 * math.ceil(math.loga(len(A_ ) ) )
UpperCAmelCase__ = 1_6
return intro_sort(A_ , 0 , len(A_ ) , A_ , A_ )
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(A_ )
max_depth -= 1
UpperCAmelCase__ = median_of_a(A_ , A_ , start + ((end - start) // 2) + 1 , end - 1 )
UpperCAmelCase__ = partition(A_ , A_ , A_ , A_ )
intro_sort(A_ , A_ , A_ , A_ , A_ )
UpperCAmelCase__ = p
return insertion_sort(A_ , A_ , A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
a : str = input('''Enter numbers separated by a comma : ''').strip()
a : List[Any] = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 710
|
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
a : Tuple = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __lowercase ):
super().__init__()
UpperCAmelCase__ = torchvision.models.resnetaaa(pretrained=__lowercase )
UpperCAmelCase__ = list(model.children() )[:-2]
UpperCAmelCase__ = nn.Sequential(*__lowercase )
UpperCAmelCase__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def A__ ( self , __lowercase ):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
UpperCAmelCase__ = self.pool(self.model(__lowercase ) )
UpperCAmelCase__ = torch.flatten(__lowercase , start_dim=2 )
UpperCAmelCase__ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase__ = [json.loads(__lowercase ) for l in open(__lowercase )]
UpperCAmelCase__ = os.path.dirname(__lowercase )
UpperCAmelCase__ = tokenizer
UpperCAmelCase__ = labels
UpperCAmelCase__ = len(__lowercase )
UpperCAmelCase__ = max_seq_length
UpperCAmelCase__ = transforms
def __len__( self ):
return len(self.data )
def __getitem__( self , __lowercase ):
UpperCAmelCase__ = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=__lowercase ) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = sentence[0], sentence[1:-1], sentence[-1]
UpperCAmelCase__ = sentence[: self.max_seq_length]
UpperCAmelCase__ = torch.zeros(self.n_classes )
UpperCAmelCase__ = 1
UpperCAmelCase__ = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
UpperCAmelCase__ = self.transforms(__lowercase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def A__ ( self ):
UpperCAmelCase__ = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->List[str]:
UpperCAmelCase__ = [len(row["""sentence"""] ) for row in batch]
UpperCAmelCase__ , UpperCAmelCase__ = len(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.long )
UpperCAmelCase__ = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase__ = input_row["""sentence"""]
UpperCAmelCase__ = 1
UpperCAmelCase__ = torch.stack([row["""image"""] for row in batch] )
UpperCAmelCase__ = torch.stack([row["""label"""] for row in batch] )
UpperCAmelCase__ = torch.stack([row["""image_start_token"""] for row in batch] )
UpperCAmelCase__ = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def snake_case__ ( ) ->int:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def snake_case__ ( ) ->str:
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
] )
| 422
| 0
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = """Hello world! cécé herlolip"""
SCREAMING_SNAKE_CASE__ : Any = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = BertAbsConfig(
temp_dir=""".""" , finetune_bert=__lowerCamelCase , large=__lowerCamelCase , share_emb=__lowerCamelCase , use_bert_emb=__lowerCamelCase , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
UpperCAmelCase__ : Any = torch.load(__lowerCamelCase , lambda __lowerCamelCase , __lowerCamelCase : storage )
UpperCAmelCase__ : int = AbsSummarizer(__lowerCamelCase , torch.device("""cpu""" ) , __lowerCamelCase )
original.eval()
UpperCAmelCase__ : Tuple = BertAbsSummarizer(__lowerCamelCase , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
UpperCAmelCase__ : Any = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
UpperCAmelCase__ : List[Any] = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__lowerCamelCase )) )
UpperCAmelCase__ : Any = torch.tensor(__lowerCamelCase ).unsqueeze(0 )
UpperCAmelCase__ : Tuple = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__lowerCamelCase )) )
UpperCAmelCase__ : Dict = torch.tensor(__lowerCamelCase ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCAmelCase__ : List[str] = encoder_input_ids
UpperCAmelCase__ : List[str] = decoder_input_ids
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Optional[int] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCAmelCase__ : Any = original(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )[0]
UpperCAmelCase__ : List[str] = original.generator(__lowerCamelCase )
UpperCAmelCase__ : List[Any] = new_model(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )[0]
UpperCAmelCase__ : int = new_model.generator(__lowerCamelCase )
UpperCAmelCase__ : List[Any] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__lowerCamelCase ) )
UpperCAmelCase__ : Any = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__lowerCamelCase ) )
UpperCAmelCase__ : str = torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 79
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCamelCase_ = get_logger(__name__)
def _UpperCAmelCase ( A , A , A , A , A=0 ):
'''simple docstring'''
os.makedirs(A , exist_ok=A )
with FSDP.state_dict_type(
A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
UpperCAmelCase__ =model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCAmelCase__ =F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
UpperCAmelCase__ =os.path.join(A , A )
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(A , A )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCAmelCase__ =(
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
UpperCAmelCase__ =os.path.join(A , A )
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(A , A )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCAmelCase__ =os.path.join(A , F"""{MODEL_NAME}_{model_index}""" )
os.makedirs(A , exist_ok=A )
logger.info(F"""Saving model to {ckpt_dir}""" )
UpperCAmelCase__ ={"model": state_dict}
dist_cp.save_state_dict(
state_dict=A , storage_writer=dist_cp.FileSystemWriter(A ) , planner=DefaultSavePlanner() , )
logger.info(F"""Model saved to {ckpt_dir}""" )
def _UpperCAmelCase ( A , A , A , A , A=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
UpperCAmelCase__ =F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
UpperCAmelCase__ =os.path.join(A , A )
logger.info(F"""Loading model from {input_model_file}""" )
UpperCAmelCase__ =torch.load(A )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCAmelCase__ =(
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
UpperCAmelCase__ =os.path.join(A , A )
logger.info(F"""Loading model from {input_model_file}""" )
UpperCAmelCase__ =torch.load(A )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCAmelCase__ =(
os.path.join(A , F"""{MODEL_NAME}_{model_index}""" )
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""" )
UpperCAmelCase__ ={"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=A , storage_reader=dist_cp.FileSystemReader(A ) , planner=DefaultLoadPlanner() , )
UpperCAmelCase__ =state_dict["model"]
logger.info(F"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(A )
def _UpperCAmelCase ( A , A , A , A , A , A=0 ):
'''simple docstring'''
os.makedirs(A , exist_ok=A )
with FSDP.state_dict_type(
A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
UpperCAmelCase__ =FSDP.optim_state_dict(A , A )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
UpperCAmelCase__ =(
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
UpperCAmelCase__ =os.path.join(A , A )
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(A , A )
logger.info(F"""Optimizer state saved in {output_optimizer_file}""" )
else:
UpperCAmelCase__ =os.path.join(A , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(A , exist_ok=A )
logger.info(F"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(A ) , planner=DefaultSavePlanner() , )
logger.info(F"""Optimizer state saved in {ckpt_dir}""" )
def _UpperCAmelCase ( A , A , A , A , A , A=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCAmelCase__ =None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
UpperCAmelCase__ =(
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
UpperCAmelCase__ =os.path.join(A , A )
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" )
UpperCAmelCase__ =torch.load(A )
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" )
else:
UpperCAmelCase__ =(
os.path.join(A , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""" )
UpperCAmelCase__ =load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(A ) , )
UpperCAmelCase__ =optim_state["optimizer"]
logger.info(F"""Optimizer loaded from {ckpt_dir}""" )
UpperCAmelCase__ =FSDP.optim_state_dict_to_load(A , A , A )
optimizer.load_state_dict(A )
| 625
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A_ = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Optional[Any]=None, UpperCAmelCase__ : List[str]=None, UpperCAmelCase__ : Optional[int]=None, UpperCAmelCase__ : Dict=None, UpperCAmelCase__ : Dict=None, UpperCAmelCase__ : Union[str, Any]=None, ) ->Optional[Any]:
if attention_mask is None:
A__ : Optional[int] = np.where(input_ids != config.pad_token_id, 1, 0 )
if decoder_attention_mask is None:
A__ : List[Any] = np.where(decoder_input_ids != config.pad_token_id, 1, 0 )
if head_mask is None:
A__ : Dict = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , snake_case : Dict , snake_case : int=13 , snake_case : Any=7 , snake_case : Optional[Any]=True , snake_case : Dict=False , snake_case : Optional[Any]=99 , snake_case : Any=16 , snake_case : List[Any]=2 , snake_case : int=4 , snake_case : Tuple=4 , snake_case : int="gelu" , snake_case : int=0.1 , snake_case : Optional[int]=0.1 , snake_case : List[Any]=32 , snake_case : List[str]=2 , snake_case : List[str]=1 , snake_case : Union[str, Any]=0 , snake_case : Any=0.02 , ):
'''simple docstring'''
A__ : List[Any] = parent
A__ : Optional[int] = batch_size
A__ : Any = seq_length
A__ : Optional[int] = is_training
A__ : int = use_labels
A__ : Optional[int] = vocab_size
A__ : Tuple = hidden_size
A__ : Union[str, Any] = num_hidden_layers
A__ : List[str] = num_attention_heads
A__ : Any = intermediate_size
A__ : Dict = hidden_act
A__ : Any = hidden_dropout_prob
A__ : Any = attention_probs_dropout_prob
A__ : Any = max_position_embeddings
A__ : List[str] = eos_token_id
A__ : Union[str, Any] = pad_token_id
A__ : List[Any] = bos_token_id
A__ : Any = initializer_range
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : List[str] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
A__ : Optional[int] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
A__ : int = shift_tokens_right(_A , 1 , 2 )
A__ : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , )
A__ : Optional[int] = prepare_blenderbot_inputs_dict(_A , _A , _A )
return config, inputs_dict
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ , A__ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : str , snake_case : Tuple ):
'''simple docstring'''
A__ : Union[str, Any] = 20
A__ : Dict = model_class_name(_A )
A__ : Any = model.encode(inputs_dict["""input_ids"""] )
A__ , A__ : Tuple = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
A__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
A__ : str = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
A__ : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A__ : str = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
A__ : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
A__ : List[str] = model.decode(
decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , )
A__ : Union[str, Any] = model.decode(_A , _A )
A__ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def _UpperCamelCase ( self : int , snake_case : List[str] , snake_case : Optional[int] , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : Tuple = 20
A__ : Union[str, Any] = model_class_name(_A )
A__ : List[Any] = model.encode(inputs_dict["""input_ids"""] )
A__ , A__ : str = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
A__ : List[str] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
A__ : Any = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
A__ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A__ : Tuple = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
A__ : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
A__ : List[str] = model.decode(
decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , )
A__ : Optional[Any] = model.decode(_A , _A , decoder_attention_mask=_A )
A__ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = 99
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
A__ : Optional[Any] = input_ids.shape[0]
A__ : List[str] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ , A__ , A__ : str = self._get_config_and_data()
A__ : Any = FlaxBlenderbotSmallForConditionalGeneration(_A )
A__ : List[Any] = lm_model(input_ids=_A )
A__ : List[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , _A )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
A__ : Tuple = FlaxBlenderbotSmallForConditionalGeneration(_A )
A__ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
A__ : Tuple = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
A__ : List[str] = lm_model(input_ids=_A , decoder_input_ids=_A )
A__ : str = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , _A )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
A__ : Optional[Any] = shift_tokens_right(_A , 1 , 2 )
A__ : List[str] = np.equal(_A , 1 ).astype(np.floataa ).sum()
A__ : str = np.equal(_A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase , __lowercase ):
snake_case_ = True
snake_case_ = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
snake_case_ = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Union[str, Any] = FlaxBlenderbotSmallModelTester(self )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ , A__ : Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A , _A , _A )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ , A__ : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ : str = self._prepare_for_class(_A , _A )
A__ : Union[str, Any] = model_class(_A )
@jax.jit
def encode_jitted(snake_case : Optional[int] , snake_case : Any=None , **snake_case : str ):
return model.encode(input_ids=_A , attention_mask=_A )
with self.subTest("""JIT Enabled""" ):
A__ : Any = encode_jitted(**_A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
A__ : Union[str, Any] = encode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ , A__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ : Dict = model_class(_A )
A__ : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
A__ : int = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(snake_case : Optional[Any] , snake_case : Dict , snake_case : List[str] ):
return model.decode(
decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , )
with self.subTest("""JIT Enabled""" ):
A__ : Union[str, Any] = decode_jitted(**_A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
A__ : int = decode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
A__ : str = model_class_name.from_pretrained("""facebook/blenderbot_small-90M""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
A__ : Dict = np.ones((1, 1) ) * model.config.eos_token_id
A__ : List[Any] = model(_A )
self.assertIsNotNone(_A )
| 705
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->str:
A__ : Tuple = {}
A__ : Union[str, Any] = tokenizer(example["""content"""], truncation=UpperCAmelCase__ )["""input_ids"""]
A__ : Any = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
A_ = HfArgumentParser(PretokenizationArguments)
A_ = parser.parse_args()
if args.num_workers is None:
A_ = multiprocessing.cpu_count()
A_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
A_ = time.time()
A_ = load_dataset(args.dataset_name, split='''train''')
print(F'Dataset loaded in {time.time()-t_start:.2f}s')
A_ = time.time()
A_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'Dataset tokenized in {time.time()-t_start:.2f}s')
A_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'Data pushed to the hub in {time.time()-t_start:.2f}s')
| 498
| 0
|
A_ : Any = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 456
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 57
| 0
|
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowercase_ ( _lowercase , _lowercase=() , _lowercase=None , _lowercase="no" , _lowercase="29500" ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : List[Any] = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
lowerCamelCase_ : Dict = True
elif "IPython" in sys.modules:
lowerCamelCase_ : List[Any] = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
lowerCamelCase_ : Optional[Any] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , _lowercase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
lowerCamelCase_ : Optional[Any] = 8
lowerCamelCase_ : str = PrepareForLaunch(_lowercase , distributed_type='''TPU''' )
print(F"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(_lowercase , args=_lowercase , nprocs=_lowercase , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*_lowercase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowercase , master_addr='''127.0.01''' , master_port=_lowercase , mixed_precision=_lowercase ):
lowerCamelCase_ : Union[str, Any] = PrepareForLaunch(_lowercase , distributed_type='''MULTI_GPU''' )
print(F"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(_lowercase , args=_lowercase , nprocs=_lowercase , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCamelCase_ : Tuple = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*_lowercase )
def lowercase_ ( _lowercase , _lowercase=() , _lowercase=2 ) -> Dict:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowercase , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
lowerCamelCase_ : int = PrepareForLaunch(_lowercase , debug=_lowercase )
start_processes(_lowercase , args=_lowercase , nprocs=_lowercase , start_method='''fork''' )
| 717
|
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = tempfile.mkdtemp()
lowerCamelCase_ : Optional[int] = 5
# Realm tok
lowerCamelCase_ : Any = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ : Optional[int] = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(A , exist_ok=A )
lowerCamelCase_ : str = os.path.join(A , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ : List[Any] = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(A , exist_ok=A )
def UpperCAmelCase__ (self ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def UpperCAmelCase__ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = np.array(
[
B'''This is the first record''',
B'''This is the second record''',
B'''This is the third record''',
B'''This is the fourth record''',
B'''This is the fifth record''',
B'''This is a longer longer longer record''',
] , dtype=A , )
return block_records
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.get_config()
lowerCamelCase_ : List[Any] = self.get_dummy_retriever()
lowerCamelCase_ : Dict = retriever.tokenizer
lowerCamelCase_ : List[Any] = np.array([0, 3] , dtype='''long''' )
lowerCamelCase_ : int = tokenizer(['''Test question'''] ).input_ids
lowerCamelCase_ : Any = tokenizer(
['''the fourth'''] , add_special_tokens=A , return_token_type_ids=A , return_attention_mask=A , ).input_ids
lowerCamelCase_ : Any = config.reader_seq_len
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Tuple = retriever(
A , A , answer_ids=A , max_length=A , return_tensors='''np''' )
self.assertEqual(len(A ) , 2 )
self.assertEqual(len(A ) , 2 )
self.assertEqual(len(A ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.get_config()
lowerCamelCase_ : Optional[Any] = self.get_dummy_retriever()
lowerCamelCase_ : Optional[int] = retriever.tokenizer
lowerCamelCase_ : int = np.array([0, 3, 5] , dtype='''long''' )
lowerCamelCase_ : List[Any] = tokenizer(['''Test question'''] ).input_ids
lowerCamelCase_ : int = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=A , return_token_type_ids=A , return_attention_mask=A , ).input_ids
lowerCamelCase_ : Tuple = config.reader_seq_len
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : int = retriever(
A , A , answer_ids=A , max_length=A , return_tensors='''np''' )
self.assertEqual([False, True, True] , A )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , A )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
lowerCamelCase_ : str = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , B'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
lowerCamelCase_ : Dict = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
lowerCamelCase_ : str = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , B'''This is the first record''' )
| 357
| 0
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def UpperCAmelCase__ ( self : Any , _A : str , _A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : int = torch.manual_seed(_A )
else:
__SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=_A ).manual_seed(_A )
__SCREAMING_SNAKE_CASE : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(_A ) ).to(_A )
__SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__SCREAMING_SNAKE_CASE : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
self._test_save_load_local()
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 74
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : Optional[Any] = cva.getAffineTransform(lowerCamelCase__ , lowerCamelCase__ )
return cva.warpAffine(lowerCamelCase__ , lowerCamelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
a =cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
a =cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a , a =gray_img.shape
# set different points to rotate image
a =np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
a =np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
a =np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
a =np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
a =[
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a =plt.figure(1)
a =["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 652
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
a ={
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class __UpperCAmelCase ( __lowerCAmelCase ):
A__ : List[Any] = '''albert'''
def __init__( self , _lowerCamelCase=30000 , _lowerCamelCase=128 , _lowerCamelCase=4096 , _lowerCamelCase=12 , _lowerCamelCase=1 , _lowerCamelCase=64 , _lowerCamelCase=16384 , _lowerCamelCase=1 , _lowerCamelCase="gelu_new" , _lowerCamelCase=0 , _lowerCamelCase=0 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1E-12 , _lowerCamelCase=0.1 , _lowerCamelCase="absolute" , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=3 , **_lowerCamelCase , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ =vocab_size
lowerCamelCase__ =embedding_size
lowerCamelCase__ =hidden_size
lowerCamelCase__ =num_hidden_layers
lowerCamelCase__ =num_hidden_groups
lowerCamelCase__ =num_attention_heads
lowerCamelCase__ =inner_group_num
lowerCamelCase__ =hidden_act
lowerCamelCase__ =intermediate_size
lowerCamelCase__ =hidden_dropout_prob
lowerCamelCase__ =attention_probs_dropout_prob
lowerCamelCase__ =max_position_embeddings
lowerCamelCase__ =type_vocab_size
lowerCamelCase__ =initializer_range
lowerCamelCase__ =layer_norm_eps
lowerCamelCase__ =classifier_dropout_prob
lowerCamelCase__ =position_embedding_type
class __UpperCAmelCase ( __lowerCAmelCase ):
@property
def _a ( self ):
if self.task == "multiple-choice":
lowerCamelCase__ ={0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 132
|
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
a =logging.getLogger(__name__)
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> str:
'''simple docstring'''
lowerCamelCase__ =bnb_quantization_config.load_in_abit
lowerCamelCase__ =bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
lowerCamelCase__ =[]
# custom device map
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(device_map.keys() ) > 1:
lowerCamelCase__ =[key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCamelCase__ =get_keys_to_not_convert(__lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__lowerCAmelCase )
lowerCamelCase__ =bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCamelCase__ =[]
lowerCamelCase__ =bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__lowerCAmelCase )
# compatibility with peft
lowerCamelCase__ =load_in_abit
lowerCamelCase__ =load_in_abit
lowerCamelCase__ =get_parameter_device(__lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
lowerCamelCase__ =replace_with_bnb_layers(__lowerCAmelCase , __lowerCAmelCase , modules_to_not_convert=__lowerCAmelCase )
# convert param to the right dtype
lowerCamelCase__ =bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCamelCase__ =name.replace(".weight" , "" ).replace(".bias" , "" )
lowerCamelCase__ =getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__lowerCAmelCase ):
param.to(__lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
lowerCamelCase__ =replace_with_bnb_layers(
__lowerCAmelCase , __lowerCAmelCase , modules_to_not_convert=__lowerCAmelCase )
lowerCamelCase__ =get_quantized_model_device_map(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , max_memory=__lowerCAmelCase , no_split_module_classes=__lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCamelCase__ =True
lowerCamelCase__ =any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowerCAmelCase , offload_state_dict=__lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__lowerCAmelCase , device_map=__lowerCAmelCase , offload_dir=__lowerCAmelCase )
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Dict:
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
lowerCamelCase__ ={"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
lowerCamelCase__ ={}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCamelCase__ ={}
lowerCamelCase__ =special_dtypes
lowerCamelCase__ =no_split_module_classes
lowerCamelCase__ =bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCamelCase__ =get_balanced_memory(
__lowerCAmelCase , low_zero=(device_map == "balanced_low_0") , max_memory=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCamelCase__ =max_memory
lowerCamelCase__ =infer_auto_device_map(__lowerCAmelCase , **__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
# check if don't have any quantized module on the cpu
lowerCamelCase__ =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCamelCase__ ={
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> List[Any]:
'''simple docstring'''
if modules_to_not_convert is None:
lowerCamelCase__ =[]
lowerCamelCase__ , lowerCamelCase__ =_replace_with_bnb_layers(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ =False
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase__ =[]
current_key_name.append(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCamelCase__ =".".join(__lowerCAmelCase )
lowerCamelCase__ =True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCamelCase__ =False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCamelCase__ =bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCamelCase__ =bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
lowerCamelCase__ =module.weight.data
if module.bias is not None:
lowerCamelCase__ =module.bias.data
bnb_module.requires_grad_(__lowerCAmelCase )
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ =True
if len(list(module.children() ) ) > 0:
lowerCamelCase__ , lowerCamelCase__ =_replace_with_bnb_layers(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ =has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCamelCase_ ( __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
with init_empty_weights():
lowerCamelCase__ =deepcopy(__lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCamelCase__ =find_tied_parameters(__lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCamelCase__ =sum(__lowerCAmelCase , [] )
lowerCamelCase__ =len(__lowerCAmelCase ) > 0
# Check if it is a base model
lowerCamelCase__ =False
if hasattr(__lowerCAmelCase , "base_model_prefix" ):
lowerCamelCase__ =not hasattr(__lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase__ =list(model.named_children() )
lowerCamelCase__ =[list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase__ =set(__lowerCAmelCase ) - set(__lowerCAmelCase )
lowerCamelCase__ =list(set(__lowerCAmelCase ) ) + list(__lowerCAmelCase )
# remove ".weight" from the keys
lowerCamelCase__ =[".weight", ".bias"]
lowerCamelCase__ =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase__ =name.replace(__lowerCAmelCase , "" )
filtered_module_names.append(__lowerCAmelCase )
return filtered_module_names
def lowerCamelCase_ ( __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
for m in model.modules():
if isinstance(__lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def lowerCamelCase_ ( __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return next(parameter.parameters() ).device
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(__lowerCAmelCase , __lowerCAmelCase , 0 , dtype=__lowerCAmelCase , value=__lowerCAmelCase )
lowerCamelCase__ =param_name
lowerCamelCase__ =model
if "." in tensor_name:
lowerCamelCase__ =tensor_name.split("." )
for split in splits[:-1]:
lowerCamelCase__ =getattr(__lowerCAmelCase , __lowerCAmelCase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
lowerCamelCase__ =new_module
lowerCamelCase__ =splits[-1]
# offload weights
lowerCamelCase__ =False
offload_weight(module._parameters[tensor_name] , __lowerCAmelCase , __lowerCAmelCase , index=__lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , __lowerCAmelCase , index=__lowerCAmelCase , )
else:
offload_weight(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , index=__lowerCAmelCase )
offload_weight(__lowerCAmelCase , param_name.replace("weight" , "SCB" ) , __lowerCAmelCase , index=__lowerCAmelCase )
set_module_tensor_to_device(__lowerCAmelCase , __lowerCAmelCase , "meta" , dtype=__lowerCAmelCase , value=torch.empty(*param.size() ) )
| 132
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =42
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , a__ : str=3 , a__ : Optional[int]=3 , a__ : str=("DownEncoderBlock2D",) , a__ : Optional[int]=(64,) , a__ : List[Any]=2 , a__ : Tuple=32 , a__ : List[Any]="silu" , a__ : Dict=True , ):
super().__init__()
UpperCAmelCase = layers_per_block
UpperCAmelCase = torch.nn.Convad(
a__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase = None
UpperCAmelCase = nn.ModuleList([] )
# down
UpperCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(a__ ):
UpperCAmelCase = output_channel
UpperCAmelCase = block_out_channels[i]
UpperCAmelCase = i == len(a__ ) - 1
UpperCAmelCase = get_down_block(
a__ , num_layers=self.layers_per_block , in_channels=a__ , out_channels=a__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=a__ , resnet_groups=a__ , attention_head_dim=a__ , temb_channels=a__ , )
self.down_blocks.append(a__ )
# mid
UpperCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=a__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=a__ , temb_channels=a__ , )
# out
UpperCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=a__ , eps=1e-6 )
UpperCAmelCase = nn.SiLU()
UpperCAmelCase = 2 * out_channels if double_z else out_channels
UpperCAmelCase = nn.Convad(block_out_channels[-1] , a__ , 3 , padding=1 )
UpperCAmelCase = False
def __snake_case ( self : Any , a__ : Optional[int] ):
UpperCAmelCase = x
UpperCAmelCase = self.conv_in(a__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(a__ : Dict ):
def custom_forward(*a__ : Union[str, Any] ):
return module(*a__ )
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0''' ):
for down_block in self.down_blocks:
UpperCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(a__ ) , a__ , use_reentrant=a__ )
# middle
UpperCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a__ , use_reentrant=a__ )
else:
for down_block in self.down_blocks:
UpperCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(a__ ) , a__ )
# middle
UpperCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , a__ )
else:
# down
for down_block in self.down_blocks:
UpperCAmelCase = down_block(a__ )
# middle
UpperCAmelCase = self.mid_block(a__ )
# post-process
UpperCAmelCase = self.conv_norm_out(a__ )
UpperCAmelCase = self.conv_act(a__ )
UpperCAmelCase = self.conv_out(a__ )
return sample
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , a__ : Optional[int]=3 , a__ : List[str]=3 , a__ : Dict=("UpDecoderBlock2D",) , a__ : List[str]=(64,) , a__ : Union[str, Any]=2 , a__ : Optional[Any]=32 , a__ : Union[str, Any]="silu" , a__ : str="group" , ):
super().__init__()
UpperCAmelCase = layers_per_block
UpperCAmelCase = nn.Convad(
a__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase = None
UpperCAmelCase = nn.ModuleList([] )
UpperCAmelCase = in_channels if norm_type == '''spatial''' else None
# mid
UpperCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=a__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=a__ , temb_channels=a__ , )
# up
UpperCAmelCase = list(reversed(a__ ) )
UpperCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(a__ ):
UpperCAmelCase = output_channel
UpperCAmelCase = reversed_block_out_channels[i]
UpperCAmelCase = i == len(a__ ) - 1
UpperCAmelCase = get_up_block(
a__ , num_layers=self.layers_per_block + 1 , in_channels=a__ , out_channels=a__ , prev_output_channel=a__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=a__ , resnet_groups=a__ , attention_head_dim=a__ , temb_channels=a__ , resnet_time_scale_shift=a__ , )
self.up_blocks.append(a__ )
UpperCAmelCase = output_channel
# out
if norm_type == "spatial":
UpperCAmelCase = SpatialNorm(block_out_channels[0] , a__ )
else:
UpperCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=a__ , eps=1e-6 )
UpperCAmelCase = nn.SiLU()
UpperCAmelCase = nn.Convad(block_out_channels[0] , a__ , 3 , padding=1 )
UpperCAmelCase = False
def __snake_case ( self : Dict , a__ : Optional[Any] , a__ : Any=None ):
UpperCAmelCase = z
UpperCAmelCase = self.conv_in(a__ )
UpperCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(a__ : Optional[int] ):
def custom_forward(*a__ : Tuple ):
return module(*a__ )
return custom_forward
if is_torch_version('''>=''' , '''1.11.0''' ):
# middle
UpperCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a__ , a__ , use_reentrant=a__ )
UpperCAmelCase = sample.to(a__ )
# up
for up_block in self.up_blocks:
UpperCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(a__ ) , a__ , a__ , use_reentrant=a__ )
else:
# middle
UpperCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a__ , a__ )
UpperCAmelCase = sample.to(a__ )
# up
for up_block in self.up_blocks:
UpperCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(a__ ) , a__ , a__ )
else:
# middle
UpperCAmelCase = self.mid_block(a__ , a__ )
UpperCAmelCase = sample.to(a__ )
# up
for up_block in self.up_blocks:
UpperCAmelCase = up_block(a__ , a__ )
# post-process
if latent_embeds is None:
UpperCAmelCase = self.conv_norm_out(a__ )
else:
UpperCAmelCase = self.conv_norm_out(a__ , a__ )
UpperCAmelCase = self.conv_act(a__ )
UpperCAmelCase = self.conv_out(a__ )
return sample
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , a__ : Optional[int] , a__ : Optional[int] , a__ : str , a__ : List[str]=None , a__ : str="random" , a__ : List[str]=False , a__ : Tuple=True ):
super().__init__()
UpperCAmelCase = n_e
UpperCAmelCase = vq_embed_dim
UpperCAmelCase = beta
UpperCAmelCase = legacy
UpperCAmelCase = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
UpperCAmelCase = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) )
UpperCAmelCase = self.used.shape[0]
UpperCAmelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCAmelCase = self.re_embed
UpperCAmelCase = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
UpperCAmelCase = n_e
UpperCAmelCase = sane_index_shape
def __snake_case ( self : str , a__ : Dict ):
UpperCAmelCase = inds.shape
assert len(a__ ) > 1
UpperCAmelCase = inds.reshape(ishape[0] , -1 )
UpperCAmelCase = self.used.to(a__ )
UpperCAmelCase = (inds[:, :, None] == used[None, None, ...]).long()
UpperCAmelCase = match.argmax(-1 )
UpperCAmelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
UpperCAmelCase = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
UpperCAmelCase = self.unknown_index
return new.reshape(a__ )
def __snake_case ( self : Optional[int] , a__ : Optional[int] ):
UpperCAmelCase = inds.shape
assert len(a__ ) > 1
UpperCAmelCase = inds.reshape(ishape[0] , -1 )
UpperCAmelCase = self.used.to(a__ )
if self.re_embed > self.used.shape[0]: # extra token
UpperCAmelCase = 0 # simply set to zero
UpperCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , a__ )
return back.reshape(a__ )
def __snake_case ( self : List[Any] , a__ : Optional[Any] ):
# reshape z -> (batch, height, width, channel) and flatten
UpperCAmelCase = z.permute(0 , 2 , 3 , 1 ).contiguous()
UpperCAmelCase = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCAmelCase = torch.argmin(torch.cdist(a__ , self.embedding.weight ) , dim=1 )
UpperCAmelCase = self.embedding(a__ ).view(z.shape )
UpperCAmelCase = None
UpperCAmelCase = None
# compute loss for embedding
if not self.legacy:
UpperCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
UpperCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
UpperCAmelCase = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCAmelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
UpperCAmelCase = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
UpperCAmelCase = self.remap_to_used(a__ )
UpperCAmelCase = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
UpperCAmelCase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __snake_case ( self : str , a__ : Any , a__ : str ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
UpperCAmelCase = indices.reshape(shape[0] , -1 ) # add batch axis
UpperCAmelCase = self.unmap_to_all(a__ )
UpperCAmelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
UpperCAmelCase = self.embedding(a__ )
if shape is not None:
UpperCAmelCase = z_q.view(a__ )
# reshape back to match original input shape
UpperCAmelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[Any] , a__ : Union[str, Any] , a__ : Union[str, Any]=False ):
UpperCAmelCase = parameters
UpperCAmelCase, UpperCAmelCase = torch.chunk(a__ , 2 , dim=1 )
UpperCAmelCase = torch.clamp(self.logvar , -30.0 , 20.0 )
UpperCAmelCase = deterministic
UpperCAmelCase = torch.exp(0.5 * self.logvar )
UpperCAmelCase = torch.exp(self.logvar )
if self.deterministic:
UpperCAmelCase = UpperCAmelCase = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __snake_case ( self : Optional[Any] , a__ : Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
UpperCAmelCase = randn_tensor(
self.mean.shape , generator=a__ , device=self.parameters.device , dtype=self.parameters.dtype )
UpperCAmelCase = self.mean + self.std * sample
return x
def __snake_case ( self : List[Any] , a__ : str=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __snake_case ( self : Tuple , a__ : str , a__ : List[Any]=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
UpperCAmelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=a__ )
def __snake_case ( self : Optional[int] ):
return self.mean
| 51
|
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=30 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=10 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=0.6 , lowerCamelCase=None , ):
'''simple docstring'''
__A : Tuple = parent
__A : Union[str, Any] = batch_size
__A : List[Any] = image_size
__A : Union[str, Any] = patch_size
__A : List[Any] = num_channels
__A : Optional[Any] = is_training
__A : str = use_labels
__A : Tuple = hidden_size
__A : int = num_hidden_layers
__A : Dict = num_attention_heads
__A : List[Any] = intermediate_size
__A : Tuple = hidden_act
__A : Tuple = hidden_dropout_prob
__A : str = attention_probs_dropout_prob
__A : Optional[Any] = type_sequence_label_size
__A : Union[str, Any] = initializer_range
__A : Optional[Any] = mask_ratio
__A : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__A : Optional[int] = (image_size // patch_size) ** 2
__A : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Tuple = None
if self.use_labels:
__A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ViTMAEModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : List[Any] = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__A : str = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : Optional[Any] = model(lowerCamelCase )
__A : List[str] = (self.image_size // self.patch_size) ** 2
__A : List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__A : List[str] = 1
__A : str = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A : int = model(lowerCamelCase )
__A : Union[str, Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : List[str] = self.prepare_config_and_inputs()
__A ,__A ,__A : List[Any] = config_and_inputs
__A : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCamelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Union[str, Any] = ViTMAEModelTester(self )
__A : Tuple = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A ,__A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(lowerCamelCase )
__A : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Optional[Any] = [*signature.parameters.keys()]
__A : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
np.random.seed(2 )
__A : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__A : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__A : Optional[Any] = torch.from_numpy(lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__A : int = pt_noise
super().check_pt_tf_models(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__A : Tuple = outputs[0].cpu().numpy()
__A : List[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
__A : List[Any] = model_class.from_pretrained(lowerCamelCase )
model.to(lowerCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__A : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
# Make sure we don't have nans
__A : List[Any] = after_outputs[0].cpu().numpy()
__A : List[str] = 0
__A : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[Any] = ViTMAEModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowercase ():
'''simple docstring'''
__A : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
np.random.seed(2 )
__A : Dict = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(lowerCamelCase )
__A : str = self.default_image_processor
__A : List[Any] = prepare_img()
__A : Union[str, Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__A : Optional[Any] = ViTMAEConfig()
__A : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__A : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__A : str = model(**lowerCamelCase , noise=torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase ) )
# verify the logits
__A : Union[str, Any] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__A : int = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCamelCase ) , atol=1E-4 ) )
| 111
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 200 ):
"""simple docstring"""
lowercase_ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ : str = [0] * (pence + 1)
lowercase_ : Dict = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_UpperCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 640
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.