code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCamelCase = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
UpperCamelCase = F"""{src_lang}-{tgt_lang}"""
UpperCamelCase = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=A__ , exist_ok=A__ )
UpperCamelCase = os.path.join(A__ , 'README.md' )
print(F"""Generating {path}""" )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(A__ )
# make sure we are under the root of the project
_lowerCamelCase : str = Path(__file__).resolve().parent.parent.parent
_lowerCamelCase : Tuple = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_lowerCamelCase : Any = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 28
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = None
class SCREAMING_SNAKE_CASE ( _a , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 2
@register_to_config
def __init__( self : Union[str, Any] , UpperCamelCase__ : float = 0.0_2 , UpperCamelCase__ : float = 1_0_0 , UpperCamelCase__ : float = 1.0_0_7 , UpperCamelCase__ : float = 8_0 , UpperCamelCase__ : float = 0.0_5 , UpperCamelCase__ : float = 5_0 , ):
"""simple docstring"""
UpperCamelCase = sigma_max
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ):
"""simple docstring"""
return sample
def A ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCamelCase = num_inference_steps
UpperCamelCase = np.arange(0 , self.num_inference_steps )[::-1].copy()
UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCamelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
UpperCamelCase = torch.tensor(UpperCamelCase__ , dtype=torch.floataa , device=UpperCamelCase__ )
def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : Optional[torch.Generator] = None ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
UpperCamelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
UpperCamelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCamelCase = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCamelCase__ ).to(sample.device )
UpperCamelCase = sigma + gamma * sigma
UpperCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_hat + sigma_hat * model_output
UpperCamelCase = (sample_hat - pred_original_sample) / sigma_hat
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def A ( self : List[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_prev + sigma_prev * model_output
UpperCamelCase = (sample_prev - pred_original_sample) / sigma_prev
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ):
"""simple docstring"""
raise NotImplementedError()
| 28
| 1
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCamelCase_ : int = 4
lowerCamelCase_ : Optional[Any] = 3
class a__ ( __snake_case ):
pass
def lowerCAmelCase( __lowerCamelCase ):
for shard in shards:
for i in range(__lowerCamelCase ):
yield {"i": i, "shard": shard}
def lowerCAmelCase( ):
__a = int(os.environ['RANK'] )
__a = int(os.environ['WORLD_SIZE'] )
__a = ArgumentParser()
parser.add_argument('--streaming' , type=__lowerCamelCase )
parser.add_argument('--local_rank' , type=__lowerCamelCase )
parser.add_argument('--num_workers' , type=__lowerCamelCase , default=0 )
__a = parser.parse_args()
__a = args.streaming
__a = args.num_workers
__a = {'shards': [f'''shard_{shard_idx}''' for shard_idx in range(__lowerCamelCase )]}
__a = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase )
if not streaming:
__a = Dataset.from_list(list(__lowerCamelCase ) )
__a = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase )
__a = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase )
__a = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__a = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__a = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 197
|
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class a__ :
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
torch.manual_seed(0 )
__a = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__a = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__a = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
__a = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
torch.manual_seed(0 )
__a = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__a = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__a = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
__a = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
__a = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = self.get_dummy_components()
__a = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = inputs['prompt']
__a = inputs['generator']
__a = inputs['num_inference_steps']
__a = inputs['output_type']
if "image" in inputs:
__a = inputs['image']
else:
__a = None
if "mask_image" in inputs:
__a = inputs['mask_image']
else:
__a = None
if "original_image" in inputs:
__a = inputs['original_image']
else:
__a = None
__a , __a = pipe.encode_prompt(UpperCAmelCase )
# inputs with prompt converted to embeddings
__a = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
__a = image
if mask_image is not None:
__a = mask_image
if original_image is not None:
__a = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__a = pipe(**UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase )
__a = self.pipeline_class.from_pretrained(UpperCAmelCase )
pipe_loaded.to(UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase , UpperCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = inputs['generator']
__a = inputs['num_inference_steps']
__a = inputs['output_type']
# inputs with prompt converted to embeddings
__a = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
__a = image
if mask_image is not None:
__a = mask_image
if original_image is not None:
__a = original_image
__a = pipe_loaded(**UpperCAmelCase )[0]
__a = np.abs(to_np(UpperCAmelCase ) - to_np(UpperCAmelCase ) ).max()
self.assertLess(UpperCAmelCase , 1e-4 )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = self.get_dummy_components()
__a = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = pipe(**UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase )
__a = self.pipeline_class.from_pretrained(UpperCAmelCase )
pipe_loaded.to(UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = pipe_loaded(**UpperCAmelCase )[0]
__a = np.abs(to_np(UpperCAmelCase ) - to_np(UpperCAmelCase ) ).max()
self.assertLess(UpperCAmelCase , 1e-4 )
| 197
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : List[Any] = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = '''mvp'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , __lowercase : List[str]=5_02_67 , __lowercase : Tuple=10_24 , __lowercase : List[str]=12 , __lowercase : List[str]=40_96 , __lowercase : List[str]=16 , __lowercase : Tuple=12 , __lowercase : List[Any]=40_96 , __lowercase : Dict=16 , __lowercase : Any=0.0 , __lowercase : Any=0.0 , __lowercase : Dict="gelu" , __lowercase : int=10_24 , __lowercase : Optional[int]=0.1 , __lowercase : Any=0.0 , __lowercase : Tuple=0.0 , __lowercase : int=0.02 , __lowercase : List[Any]=0.0 , __lowercase : Optional[int]=False , __lowercase : Tuple=True , __lowercase : int=1 , __lowercase : Dict=0 , __lowercase : List[str]=2 , __lowercase : int=True , __lowercase : List[Any]=2 , __lowercase : List[str]=2 , __lowercase : List[str]=False , __lowercase : List[Any]=1_00 , __lowercase : int=8_00 , **__lowercase : Optional[Any] , ):
"""simple docstring"""
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = d_model
snake_case_ = encoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = encoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = classifier_dropout
snake_case_ = use_cache
snake_case_ = encoder_layers
snake_case_ = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case_ = use_prompt
snake_case_ = prompt_length
snake_case_ = prompt_mid_dim
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , **__lowercase , )
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , __lowercase ):
snake_case_ = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"The config can simply be saved and uploaded again to be fixed." )
| 187
|
from timeit import timeit
lowercase__ : Union[str, Any] = {
"MALAYALAM": True,
"String": False,
"rotor": True,
"level": True,
"A": True,
"BB": True,
"ABC": False,
"amanaplanacanalpanama": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = len(_A ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = len(_A ) // 2
snake_case_ = len(_A )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_A ) )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if len(_A ) <= 2:
return True
if s[0] == s[len(_A ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return s == s[::-1]
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = f"all({name}(key) is value for key, value in test_data.items())"
snake_case_ = f"from __main__ import test_data, {name}"
snake_case_ = 500000
snake_case_ = timeit(stmt=_A , setup=_A , number=_A )
print(f"{name:<35} finished {number:,} runs in {result:.5f} seconds" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'''{key:21} {value}''')
print("a man a plan a canal panama")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("is_palindrome_slice")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("is_palindrome")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("is_palindrome_recursive")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("is_palindrome_traversal")
| 187
| 1
|
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase = 100 ) -> str:
_lowerCAmelCase =n * (n + 1) * (2 * n + 1) / 6
_lowerCAmelCase =(n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 350
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =1
_lowerCAmelCase =3
_lowerCAmelCase =(32, 32)
_lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_lowerCAmelCase =unet.half()
_lowerCAmelCase =text_encoder.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , )
_lowerCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 341
| 0
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
a__ : int = logging.get_logger(__name__)
a__ : List[str] = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
a__ : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE : Optional[int] = model_type_to_module_name(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(a__ , a__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(a__ , '''__name__''' , a__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE : Optional[int] = importlib.import_module('''transformers''' )
if hasattr(a__ , a__ ):
return getattr(a__ , a__ )
return None
def UpperCAmelCase_( a__ , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , **a__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = get_file_from_repo(
a__ , a__ , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(a__ , encoding='''utf-8''' ) as reader:
return json.load(a__ )
class a_ :
"""simple docstring"""
def __init__( self ) ->Union[str, Any]:
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_lowerCamelCase )
def __lowerCAmelCase ( cls , _lowerCamelCase , **_lowerCamelCase ) ->List[str]:
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''config''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('''trust_remote_code''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = ImageProcessingMixin.get_image_processor_dict(_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = config_dict.get('''image_processor_type''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
SCREAMING_SNAKE_CASE : str = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
SCREAMING_SNAKE_CASE : Any = config_dict.pop('''feature_extractor_type''' , _lowerCamelCase )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
SCREAMING_SNAKE_CASE : List[str] = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
SCREAMING_SNAKE_CASE : Tuple = config_dict['''auto_map''']['''AutoFeatureExtractor''']
SCREAMING_SNAKE_CASE : List[str] = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# It could be in `config.image_processor_type``
SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , '''image_processor_type''' , _lowerCamelCase )
if hasattr(_lowerCamelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
SCREAMING_SNAKE_CASE : Tuple = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
SCREAMING_SNAKE_CASE : Optional[int] = image_processor_class_from_name(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = image_processor_auto_map is not None
SCREAMING_SNAKE_CASE : List[Any] = image_processor_class is not None or type(_lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING
SCREAMING_SNAKE_CASE : str = resolve_trust_remote_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE : List[Any] = get_class_from_dynamic_module(
_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('''code_revision''' , _lowerCamelCase )
if os.path.isdir(_lowerCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING:
SCREAMING_SNAKE_CASE : Any = IMAGE_PROCESSOR_MAPPING[type(_lowerCamelCase )]
return image_processor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowerCAmelCase ( _lowerCamelCase , _lowerCamelCase ) ->Tuple:
IMAGE_PROCESSOR_MAPPING.register(_lowerCamelCase , _lowerCamelCase )
| 313
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
a__ : Any = logging.get_logger(__name__)
a__ : Dict = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'imagegpt'
__SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values']
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowerCamelCase=512 + 1 , _lowerCamelCase=32 * 32 , _lowerCamelCase=512 , _lowerCamelCase=24 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase="quick_gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ) ->str:
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = n_positions
SCREAMING_SNAKE_CASE : Optional[int] = n_embd
SCREAMING_SNAKE_CASE : List[Any] = n_layer
SCREAMING_SNAKE_CASE : List[Any] = n_head
SCREAMING_SNAKE_CASE : int = n_inner
SCREAMING_SNAKE_CASE : Dict = activation_function
SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop
SCREAMING_SNAKE_CASE : Dict = embd_pdrop
SCREAMING_SNAKE_CASE : List[str] = attn_pdrop
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : int = scale_attn_weights
SCREAMING_SNAKE_CASE : Optional[int] = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 32 , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return inputs
| 313
| 1
|
'''simple docstring'''
import os
def __a ( UpperCAmelCase = "matrix.txt" ) ->int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(UpperCAmelCase ) , UpperCAmelCase ) ) as in_file:
A = in_file.read()
A = [[int(UpperCAmelCase ) for cell in row.split(""",""" )] for row in data.strip().splitlines()]
A = [[0 for cell in row] for row in grid]
A = len(grid[0] )
A = [[0 for i in range(UpperCAmelCase )] for j in range(UpperCAmelCase )]
A = grid[0][0]
for i in range(1 , UpperCAmelCase ):
A = grid[0][i] + dp[0][i - 1]
for i in range(1 , UpperCAmelCase ):
A = grid[i][0] + dp[i - 1][0]
for i in range(1 , UpperCAmelCase ):
for j in range(1 , UpperCAmelCase ):
A = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f"{solution() = }")
| 337
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
def __a ( UpperCAmelCase ) ->List[int]:
"""simple docstring"""
if isinstance(UpperCAmelCase , np.ndarray ):
return list(tensor.shape )
A = tf.shape(UpperCAmelCase )
if tensor.shape == tf.TensorShape(UpperCAmelCase ):
return dynamic
A = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase )]
def __a ( UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) ->tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9 , axis=UpperCAmelCase , name=UpperCAmelCase )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase=-1 ) ->str:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
A , A = tf.nn.moments(UpperCAmelCase , axes=[axis] , keepdims=UpperCAmelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
A = [1] * inputs.shape.rank
A = shape_list(UpperCAmelCase )[axis]
A = tf.reshape(UpperCAmelCase , UpperCAmelCase )
A = tf.reshape(UpperCAmelCase , UpperCAmelCase )
# Compute layer normalization using the batch_normalization
# function.
A = tf.nn.batch_normalization(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , offset=UpperCAmelCase , scale=UpperCAmelCase , variance_epsilon=UpperCAmelCase , )
return outputs
def __a ( UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=-1 ) ->int:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
A = tf.shape(UpperCAmelCase )
A = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
A = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCAmelCase , UpperCAmelCase )
def __a ( UpperCAmelCase ) ->tf.Tensor:
"""simple docstring"""
if not isinstance(UpperCAmelCase , tf.Tensor ):
A = tf.convert_to_tensor(UpperCAmelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
A = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
A = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
A = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = "input_ids" ) ->None:
"""simple docstring"""
tf.debugging.assert_less(
UpperCAmelCase , tf.cast(UpperCAmelCase , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
A = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
A = [x for x in data if len(UpperCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
A = np.asarray(UpperCAmelCase )
A = 1
A = np.array_split(UpperCAmelCase , UpperCAmelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
A = np.array_split(UpperCAmelCase , UpperCAmelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase ):
A = chunk_data
else:
A = data
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if name in group.attrs:
A = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase , """decode""" ) else n for n in group.attrs[name]]
else:
A = []
A = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(UpperCAmelCase , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def __a ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
def _expand_single_ad_tensor(UpperCAmelCase ):
if isinstance(UpperCAmelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase )
| 337
| 1
|
def UpperCamelCase ( __lowercase : Optional[Any] ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
A_ : Optional[int] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A_ : Tuple = 1
if upper_limit > 0:
A_ : Optional[Any] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 ,upper_limit + 1 ):
for j in range(_SCREAMING_SNAKE_CASE ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_UpperCAmelCase = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 140
|
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCAmelCase__ = datasets.load_iris()
lowerCAmelCase__ = np.array(data['''data'''])
lowerCAmelCase__ = np.array(data['''target'''])
lowerCAmelCase__ = data['''target_names''']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = train_test_split(X, y)
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return np.linalg.norm(np.array(_SCREAMING_SNAKE_CASE ) - np.array(_SCREAMING_SNAKE_CASE ) )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 ):
"""simple docstring"""
UpperCamelCase = zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# List of distances of all points from the point to be classified
UpperCamelCase = []
for data_point in data:
UpperCamelCase = euclidean_distance(data_point[0] , _SCREAMING_SNAKE_CASE )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCamelCase = [i[1] for i in sorted(_SCREAMING_SNAKE_CASE )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCamelCase = Counter(_SCREAMING_SNAKE_CASE ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 153
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCamelCase = logging.get_logger(__name__)
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : int = ["pixel_values"]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 0.9 , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCAmelCase : List[str] = size if size is not None else {"""shortest_edge""": 224}
__UpperCAmelCase : Any = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
__UpperCAmelCase : Optional[int] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__UpperCAmelCase : Union[str, Any] = get_size_dict(__UpperCAmelCase , param_name="""crop_size""" )
__UpperCAmelCase : Dict = do_resize
__UpperCAmelCase : int = size
__UpperCAmelCase : Optional[int] = crop_pct
__UpperCAmelCase : int = resample
__UpperCAmelCase : int = do_center_crop
__UpperCAmelCase : int = crop_size
__UpperCAmelCase : List[Any] = do_rescale
__UpperCAmelCase : int = rescale_factor
__UpperCAmelCase : Dict = do_normalize
__UpperCAmelCase : Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCAmelCase : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
'''simple docstring'''
__UpperCAmelCase : List[str] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
__UpperCAmelCase : Tuple = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__UpperCAmelCase : List[Any] = int(size["""height"""] / crop_pct )
else:
__UpperCAmelCase : str = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(__UpperCAmelCase ) )
__UpperCAmelCase : Union[str, Any] = get_resize_output_image_size(__UpperCAmelCase , size=__UpperCAmelCase , default_to_square=__UpperCAmelCase )
else:
if "shortest_edge" in size:
__UpperCAmelCase : Tuple = get_resize_output_image_size(__UpperCAmelCase , size=size["""shortest_edge"""] , default_to_square=__UpperCAmelCase )
elif "height" in size and "width" in size:
__UpperCAmelCase : int = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(__UpperCAmelCase ) )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(__UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Union[str, Any]:
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
'''simple docstring'''
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : int = crop_pct if crop_pct is not None else self.crop_pct
__UpperCAmelCase : Tuple = resample if resample is not None else self.resample
__UpperCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : List[Any] = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase : Dict = image_std if image_std is not None else self.image_std
__UpperCAmelCase : Any = size if size is not None else self.size
__UpperCAmelCase : Any = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
__UpperCAmelCase : int = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase : Optional[Any] = get_size_dict(__UpperCAmelCase , param_name="""crop_size""" )
__UpperCAmelCase : List[Any] = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__UpperCAmelCase : Union[str, Any] = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
__UpperCAmelCase : int = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , crop_pct=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
__UpperCAmelCase : Union[str, Any] = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
__UpperCAmelCase : List[str] = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
__UpperCAmelCase : Optional[int] = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
__UpperCAmelCase : Union[str, Any] = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__UpperCAmelCase : Dict = {"""pixel_values""": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 356
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _A :
_SCREAMING_SNAKE_CASE : List[str]
_SCREAMING_SNAKE_CASE : Optional[str] = None
# Automatically constructed
_SCREAMING_SNAKE_CASE : ClassVar[str] = "dict"
_SCREAMING_SNAKE_CASE : ClassVar[Any] = None
_SCREAMING_SNAKE_CASE : str = field(default="Translation" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ) -> Any:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __A ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class _A :
_SCREAMING_SNAKE_CASE : Optional[List] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
# Automatically constructed
_SCREAMING_SNAKE_CASE : ClassVar[str] = "dict"
_SCREAMING_SNAKE_CASE : ClassVar[Any] = None
_SCREAMING_SNAKE_CASE : str = field(default="TranslationVariableLanguages" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = sorted(set(self.languages ) ) if self.languages else None
__UpperCAmelCase : int = len(self.languages ) if self.languages else None
def __call__( self ) -> Optional[Any]:
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def __A ( self , __UpperCAmelCase ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = set(self.languages )
if self.languages and set(__UpperCAmelCase ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(__UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(__UpperCAmelCase )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__UpperCAmelCase : Dict = []
for lang, text in translation_dict.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = zip(*sorted(__UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def __A ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 16
| 0
|
"""simple docstring"""
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
__lowerCAmelCase : Optional[int] ="""Usage of script: script_name <size_of_canvas:int>"""
__lowerCAmelCase : List[Any] =[0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> list[list[bool]]:
'''simple docstring'''
lowercase = [[False for i in range(lowerCAmelCase__ )] for j in range(lowerCAmelCase__ )]
return canvas
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list[bool]] ) -> None:
'''simple docstring'''
for i, row in enumerate(lowerCAmelCase__ ):
for j, _ in enumerate(lowerCAmelCase__ ):
lowercase = bool(random.getrandbits(1 ) )
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list[bool]] ) -> list[list[bool]]:
'''simple docstring'''
lowercase = np.array(lowerCAmelCase__ )
lowercase = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(lowerCAmelCase__ ):
for c, pt in enumerate(lowerCAmelCase__ ):
lowercase = __judge_point(
lowerCAmelCase__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
lowercase = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
lowercase = current_canvas.tolist()
return return_canvas
def UpperCAmelCase__ ( lowerCAmelCase__ :bool , lowerCAmelCase__ :list[list[bool]] ) -> bool:
'''simple docstring'''
lowercase = 0
lowercase = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
lowercase = pt
if pt:
if alive < 2:
lowercase = False
elif alive == 2 or alive == 3:
lowercase = True
elif alive > 3:
lowercase = False
else:
if alive == 3:
lowercase = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
__lowerCAmelCase : Union[str, Any] =int(sys.argv[1])
# main working structure of this module.
__lowerCAmelCase : str =create_canvas(canvas_size)
seed(c)
__lowerCAmelCase , __lowerCAmelCase : Any =plt.subplots()
fig.show()
__lowerCAmelCase : Optional[int] =ListedColormap(["""w""", """k"""])
try:
while True:
__lowerCAmelCase : Optional[Any] =run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 197
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
__lowerCAmelCase : List[Any] ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
__lowerCAmelCase : Optional[int] ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
__lowerCAmelCase : str ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def A__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
"""simple docstring"""
if return_pvalue:
lowercase = pearsonr(__lowerCAmelCase , __lowerCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__lowerCAmelCase , __lowerCAmelCase )[0] )}
| 197
| 1
|
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Union[str, Any] = get_activation("""swish""" )
self.assertIsInstance(UpperCAmelCase__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_0_0 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa ) ).item() , 2_0 )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
a__ : Optional[Any] = get_activation("""silu""" )
self.assertIsInstance(UpperCAmelCase__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_0_0 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa ) ).item() , 2_0 )
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
a__ : Union[str, Any] = get_activation("""mish""" )
self.assertIsInstance(UpperCAmelCase__ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_0_0 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa ) ).item() , 2_0 )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Optional[Any] = get_activation("""gelu""" )
self.assertIsInstance(UpperCAmelCase__ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_0_0 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa ) ).item() , 2_0 )
| 361
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : Union[str, Any] =16
_lowercase : Dict =32
def lowerCAmelCase_ ( _lowercase : Accelerator , _lowercase : int = 16 , _lowercase : str = "bert-base-cased") -> Union[str, Any]:
"""simple docstring"""
a__ : Union[str, Any] = AutoTokenizer.from_pretrained(_lowercase)
a__ : Any = load_dataset("""glue""" , """mrpc""")
def tokenize_function(_lowercase : str):
# max_length=None => use the model max length (it's actually the default)
a__ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowercase , max_length=_lowercase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a__ : Any = datasets.map(
_lowercase , batched=_lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_lowercase)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Tuple = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(_lowercase : Optional[Any]):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""")
return tokenizer.pad(_lowercase , padding="""longest""" , return_tensors="""pt""")
# Instantiate dataloaders.
a__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase)
a__ : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase)
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( _lowercase : Any , _lowercase : Dict) -> int:
"""simple docstring"""
# Initialize accelerator
a__ : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : Union[str, Any] = config["""lr"""]
a__ : List[str] = int(config["""num_epochs"""])
a__ : List[str] = int(config["""seed"""])
a__ : Tuple = int(config["""batch_size"""])
a__ : int = args.model_name_or_path
set_seed(_lowercase)
a__ , a__ : int = get_dataloaders(_lowercase , _lowercase , _lowercase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase)
# Instantiate optimizer
a__ : int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
a__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowercase)
if accelerator.state.deepspeed_plugin is not None:
a__ : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
a__ : List[str] = 1
a__ : List[Any] = (len(_lowercase) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
a__ : Dict = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
a__ : Dict = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : List[Any] = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
# We need to keep track of how many total steps we have iterated over
a__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
a__ : Optional[int] = 0
# Now we train the model
a__ : Tuple = evaluate.load("""glue""" , """mrpc""")
a__ : List[Any] = 0
a__ : Tuple = {}
for epoch in range(_lowercase , _lowercase):
model.train()
for step, batch in enumerate(_lowercase):
a__ : Union[str, Any] = model(**_lowercase)
a__ : Tuple = outputs.loss
a__ : Any = loss / gradient_accumulation_steps
accelerator.backward(_lowercase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
a__ : int = 0
for step, batch in enumerate(_lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
a__ : str = model(**_lowercase)
a__ : Union[str, Any] = outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
a__ , a__ : Optional[int] = accelerator.gather(
(predictions, batch["""labels"""])) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase) - 1:
a__ : Union[str, Any] = predictions[: len(eval_dataloader.dataset) - samples_seen]
a__ : Any = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
a__ : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowercase)
a__ : Any = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
a__ : List[str] = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""") , """w""") as f:
json.dump(_lowercase , _lowercase)
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""")
parser.add_argument(
"""--model_name_or_path""" , type=_lowercase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_lowercase , )
parser.add_argument(
"""--output_dir""" , type=_lowercase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=_lowercase , default=_lowercase , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=_lowercase , default=3 , help="""Number of train epochs.""" , )
a__ : Any = parser.parse_args()
a__ : Dict = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(_lowercase , _lowercase)
if __name__ == "__main__":
main()
| 266
| 0
|
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def lowerCamelCase ( lowerCAmelCase : Any ):
"""simple docstring"""
__magic_name__ : int = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
f'{test_file} instead.' )
__magic_name__ : Dict = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(f'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
f'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
__magic_name__ : Dict = components[:-1] + [test_fn.replace('.py' , '' )]
__magic_name__ : List[Any] = '.'.join(lowerCAmelCase )
return test_module_path
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
__magic_name__ : int = get_module_path(lowerCAmelCase )
__magic_name__ : int = importlib.import_module(lowerCAmelCase )
return test_module
def lowerCamelCase ( lowerCAmelCase : Any ):
"""simple docstring"""
__magic_name__ : Tuple = []
__magic_name__ : List[str] = get_test_module(lowerCAmelCase )
for attr in dir(lowerCAmelCase ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(lowerCAmelCase , lowerCAmelCase ) )
# sort with class names
return sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x.__name__ )
def lowerCamelCase ( lowerCAmelCase : List[str] ):
"""simple docstring"""
__magic_name__ : Optional[Any] = []
__magic_name__ : List[Any] = get_test_module(lowerCAmelCase )
for attr in dir(lowerCAmelCase ):
__magic_name__ : int = getattr(lowerCAmelCase , lowerCAmelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__magic_name__ : List[Any] = getattr(lowerCAmelCase , 'all_model_classes' , [] )
if len(lowerCAmelCase ) > 0:
test_classes.append(lowerCAmelCase )
# sort with class names
return sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x.__name__ )
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = get_test_classes(lowerCAmelCase )
__magic_name__ : Union[str, Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x.__name__ )
def lowerCamelCase ( lowerCAmelCase : List[Any] ):
"""simple docstring"""
__magic_name__ : Optional[Any] = test_class()
if hasattr(lowerCAmelCase , 'setUp' ):
test.setUp()
__magic_name__ : str = None
if hasattr(lowerCAmelCase , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__magic_name__ : str = test.model_tester.__class__
return model_tester
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Any ):
"""simple docstring"""
__magic_name__ : str = get_test_classes(lowerCAmelCase )
__magic_name__ : Tuple = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowerCAmelCase )
# sort with class names
return sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x.__name__ )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ):
"""simple docstring"""
__magic_name__ : List[Any] = get_test_classes_for_model(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : List[str] = []
for test_class in test_classes:
__magic_name__ : int = get_model_tester_from_test_class(lowerCAmelCase )
if tester_class is not None:
tester_classes.append(lowerCAmelCase )
# sort with class names
return sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x.__name__ )
def lowerCamelCase ( lowerCAmelCase : Any ):
"""simple docstring"""
__magic_name__ : Optional[int] = get_test_classes(lowerCAmelCase )
__magic_name__ : Optional[Any] = {test_class: get_model_tester_from_test_class(lowerCAmelCase ) for test_class in test_classes}
return test_tester_mapping
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : List[Any] = get_model_classes(lowerCAmelCase )
__magic_name__ : Tuple = {
model_class: get_test_classes_for_model(lowerCAmelCase , lowerCAmelCase ) for model_class in model_classes
}
return model_test_mapping
def lowerCamelCase ( lowerCAmelCase : List[Any] ):
"""simple docstring"""
__magic_name__ : List[Any] = get_model_classes(lowerCAmelCase )
__magic_name__ : Optional[int] = {
model_class: get_tester_classes_for_model(lowerCAmelCase , lowerCAmelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCamelCase ( lowerCAmelCase : Any ):
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase ):
return o
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
return o.__name__
elif isinstance(lowerCAmelCase , (list, tuple) ):
return [to_json(lowerCAmelCase ) for x in o]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
return {to_json(lowerCAmelCase ): to_json(lowerCAmelCase ) for k, v in o.items()}
else:
return o
| 331
|
'''simple docstring'''
def lowerCamelCase ( ):
"""simple docstring"""
return 1
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int = 200 ):
"""simple docstring"""
return two_pound(lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 331
| 1
|
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
_UpperCamelCase = parse(importlib.metadata.version('''torch'''))
def UpperCamelCase_( snake_case__: Union[str, Version] , snake_case__: str , snake_case__: str ) -> Tuple:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}" )
UpperCAmelCase__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase__ = parse(importlib.metadata.version(snake_case__ ) )
return operation(snake_case__ , parse(snake_case__ ) )
def UpperCamelCase_( snake_case__: str , snake_case__: str ) -> Any:
return compare_versions(snake_case__ , snake_case__ , snake_case__ )
| 335
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_UpperCamelCase = Lock()
def UpperCamelCase_( snake_case__: Optional[Any] , snake_case__: Optional[int] , snake_case__: Tuple , snake_case__: Tuple , snake_case__: Tuple , snake_case__: Dict , snake_case__: Any ) -> str:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase__ = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase__ = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def UpperCamelCase_( snake_case__: Any ) -> Tuple:
UpperCAmelCase__ = []
UpperCAmelCase__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
UpperCAmelCase__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase_( ) -> Dict:
UpperCAmelCase__ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
UpperCAmelCase__ = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main()
| 335
| 1
|
import os
def __lowercase ( _UpperCamelCase = "matrix.txt" ) ->int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(_UpperCamelCase ), _UpperCamelCase ) ) as in_file:
lowercase : List[Any] = in_file.read()
lowercase : str = [[int(_UpperCamelCase ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
lowercase : Union[str, Any] = [[0 for cell in row] for row in grid]
lowercase : Any = len(grid[0] )
lowercase : Dict = [[0 for i in range(_UpperCamelCase )] for j in range(_UpperCamelCase )]
lowercase : str = grid[0][0]
for i in range(1, _UpperCamelCase ):
lowercase : str = grid[0][i] + dp[0][i - 1]
for i in range(1, _UpperCamelCase ):
lowercase : List[Any] = grid[i][0] + dp[i - 1][0]
for i in range(1, _UpperCamelCase ):
for j in range(1, _UpperCamelCase ):
lowercase : Tuple = grid[i][j] + min(dp[i - 1][j], dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 337
|
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(_UpperCamelCase ) / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337
| 1
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_snake_case = logging.get_logger(__name__)
def _A ( __magic_name__ ):
lowercase__ = R"\w+[.]\d+"
lowercase__ = re.findall(__magic_name__ , __magic_name__ )
for pat in pats:
lowercase__ = key.replace(__magic_name__ , "_".join(pat.split("." ) ) )
return key
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase__ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase__ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase__ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowercase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _A ( __magic_name__ , __magic_name__ , __magic_name__=42 ):
# Step 1: Convert pytorch tensor to numpy
lowercase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase__ = flax_model.init_weights(PRNGKey(__magic_name__ ) )
lowercase__ = flatten_dict(__magic_name__ )
lowercase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__ = rename_key(__magic_name__ )
lowercase__ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowercase__ , lowercase__ = rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowercase__ = jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
| 201
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_snake_case = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'Whether tp freeze the encoder.'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__lowerCamelCase = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
__lowerCamelCase = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
__lowerCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
__lowerCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
__lowerCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'Source language id for translation.'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'Target language id for translation.'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': '# num_beams to use for evaluation.'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
logger.info(f'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(f''' {key} = {metrics[key]}''' )
save_json(__magic_name__ , os.path.join(__magic_name__ , f'''{split}_results.json''' ) )
def _A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
check_output_dir(__magic_name__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , __magic_name__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(__magic_name__ , __magic_name__ , __magic_name__ ):
assert hasattr(__magic_name__ , __magic_name__ ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(__magic_name__ , __magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=__magic_name__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__magic_name__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase__ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__magic_name__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase__ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__magic_name__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase__ = SeqaSeqDataset
# Get datasets
lowercase__ = (
dataset_class(
__magic_name__ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
lowercase__ = (
dataset_class(
__magic_name__ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase__ = (
dataset_class(
__magic_name__ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase__ = (
build_compute_metrics_fn(data_args.task , __magic_name__ ) if training_args.predict_with_generate else None
)
lowercase__ = SeqaSeqTrainer(
model=__magic_name__ , args=__magic_name__ , data_args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , data_collator=SeqaSeqDataCollator(
__magic_name__ , __magic_name__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__magic_name__ , tokenizer=__magic_name__ , )
lowercase__ = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
lowercase__ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase__ = train_result.metrics
lowercase__ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , __magic_name__ , training_args.output_dir )
all_metrics.update(__magic_name__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase__ = trainer.evaluate(metric_key_prefix="val" )
lowercase__ = data_args.n_val
lowercase__ = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , __magic_name__ , training_args.output_dir )
all_metrics.update(__magic_name__ )
if training_args.do_predict:
logger.info("*** Predict ***" )
lowercase__ = trainer.predict(test_dataset=__magic_name__ , metric_key_prefix="test" )
lowercase__ = test_output.metrics
lowercase__ = data_args.n_test
if trainer.is_world_process_zero():
lowercase__ = round(metrics["test_loss"] , 4 )
handle_metrics("test" , __magic_name__ , training_args.output_dir )
all_metrics.update(__magic_name__ )
if training_args.predict_with_generate:
lowercase__ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
lowercase__ = lmap(str.strip , __magic_name__ )
write_txt_file(__magic_name__ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(__magic_name__ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def _A ( __magic_name__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 201
| 1
|
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0.dev0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
__lowercase : str = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
A_ = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
A_ = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
A_ = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A_ = field(
default=A_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
A_ = field(
default=A_ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
A_ = field(
default=A_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A_ = field(
default=A_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
A_ = field(
default=A_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
A_ = field(
default=A_ , metadata={"help": "A csv or a json file containing the training data."} )
A_ = field(
default=A_ , metadata={"help": "A csv or a json file containing the validation data."} )
A_ = field(default=A_ , metadata={"help": "A csv or a json file containing the test data."} )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
__a : List[str] = self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__a : Optional[int] = self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __UpperCamelCase :
A_ = field(
default=A_ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A_ = field(
default=A_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A_ = field(
default=A_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A_ = field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
A_ = field(
default=A_ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
A_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
A_ = field(
default=A_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def lowerCamelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a : Optional[Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
__a : Tuple = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
datasets.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__a : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__a : Any = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__a : str = data_args.train_file.split('.' )[-1]
__a : Tuple = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__a : Dict = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
__a : Union[str, Any] = load_dataset('csv' , data_files=__lowerCamelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__a : Optional[Any] = load_dataset('json' , data_files=__lowerCamelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__a : int = raw_datasets['''train'''].features['''label'''].names
__a : List[Any] = len(__lowerCamelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__a : List[Any] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__lowerCamelCase , )
__a : Any = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__a : str = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__a : List[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__a : Any = {'''Refused''': 0, '''Entailed''': 1}
__a : str = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__a : str = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_SCREAMING_SNAKE_CASE : Optional[int] ):
# Tokenize the texts
def _convert_table_text_to_pandas(_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Dict = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
__a : List[Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__a : Tuple = examples['''statement''']
__a : str = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
__a : Dict = tokenizer(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase )
__a : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
__a : List[Any] = raw_datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__a : str = raw_datasets['''train''']
if data_args.max_train_samples is not None:
__a : Union[str, Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__a : Any = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
__a : Optional[int] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
__a : Optional[Any] = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
__a : str = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__lowerCamelCase ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : Union[str, Any] = p.predictions[0] if isinstance(p.predictions , __lowerCamelCase ) else p.predictions
__a : Dict = np.argmax(__lowerCamelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__a : List[str] = default_data_collator
elif training_args.fpaa:
__a : Any = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 )
else:
__a : List[Any] = None
# Initialize our Trainer
__a : Union[str, Any] = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCamelCase , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
__a : Dict = None
if training_args.resume_from_checkpoint is not None:
__a : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a : int = last_checkpoint
__a : List[str] = trainer.train(resume_from_checkpoint=__lowerCamelCase )
__a : List[str] = train_result.metrics
__a : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCamelCase )
)
__a : Any = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , __lowerCamelCase )
trainer.save_metrics('train' , __lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__a : Union[str, Any] = trainer.evaluate(eval_dataset=__lowerCamelCase )
__a : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCamelCase )
__a : Tuple = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.log_metrics('eval' , __lowerCamelCase )
trainer.save_metrics('eval' , __lowerCamelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__a : Tuple = predict_dataset.remove_columns('label' )
__a : str = trainer.predict(__lowerCamelCase , metric_key_prefix='predict' ).predictions
__a : Tuple = np.argmax(__lowerCamelCase , axis=1 )
__a : List[Any] = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(__lowerCamelCase ):
__a : Optional[Any] = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
__a : Dict = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCamelCase )
else:
trainer.create_model_card(**__lowerCamelCase )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 27
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any]=13 ,_snake_case : Any=32 ,_snake_case : int=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=16 ,_snake_case : List[Any]=[1, 2, 1] ,_snake_case : Dict=[2, 2, 4] ,_snake_case : List[Any]=2 ,_snake_case : Any=2.0 ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=0.0 ,_snake_case : Union[str, Any]=0.0 ,_snake_case : str=0.1 ,_snake_case : List[Any]="gelu" ,_snake_case : Tuple=False ,_snake_case : Optional[int]=True ,_snake_case : str=0.02 ,_snake_case : List[str]=1e-5 ,_snake_case : int=True ,_snake_case : Dict=None ,_snake_case : str=True ,_snake_case : List[Any]=10 ,_snake_case : Any=8 ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = parent
lowercase__ : Any = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = patch_size
lowercase__ : int = num_channels
lowercase__ : Any = embed_dim
lowercase__ : int = depths
lowercase__ : Dict = num_heads
lowercase__ : List[Any] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Optional[int] = qkv_bias
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : Dict = drop_path_rate
lowercase__ : int = hidden_act
lowercase__ : Tuple = use_absolute_embeddings
lowercase__ : Tuple = patch_norm
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Optional[Any] = initializer_range
lowercase__ : int = is_training
lowercase__ : Optional[int] = scope
lowercase__ : str = use_labels
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Union[str, Any] = encoder_stride
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = SwinvaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case )
lowercase__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : int ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = SwinvaForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(_snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ : Optional[int] = 1
lowercase__ : List[Any] = SwinvaForMaskedImageModeling(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : str = model(_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : str ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.type_sequence_label_size
lowercase__ : Dict = SwinvaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase : Optional[int] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = SwinvaModelTester(self )
lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,embed_dim=37 )
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = True
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Dict = outputs.attentions
lowercase__ : Any = len(self.model_tester.depths )
self.assertEqual(len(_snake_case ) ,_snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : List[Any] = True
lowercase__ : Optional[Any] = config.window_size**2
lowercase__ : Any = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
lowercase__ : Optional[Any] = len(_snake_case )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : Tuple = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
if hasattr(self.model_tester ,'''num_hidden_states_types''' ):
lowercase__ : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowercase__ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(_snake_case ) )
lowercase__ : Optional[int] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_snake_case ) ,_snake_case )
# Swinv2 has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowercase__ : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(_snake_case ) ,_snake_case )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = reshaped_hidden_states[0].shape
lowercase__ : int = (
reshaped_hidden_states[0].view(_snake_case ,_snake_case ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = 3
lowercase__ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = SwinvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(config=_snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
_snake_case )
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ : Dict = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**_snake_case )
# verify the logits
lowercase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Dict = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
| 16
| 0
|
lowerCamelCase_ = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 14
|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a_ :
'''simple docstring'''
__a: int
__a: int
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = [[] for _ in range(lowercase_ )]
lowerCAmelCase_ = size
def __getitem__( self , lowercase_ ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
return self._size
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowercase_ , lowercase_ ) )
def _lowercase ( self , lowercase_ , lowercase_ ) -> int | None:
'''simple docstring'''
lowerCAmelCase_ = deque([start_vertex] )
lowerCAmelCase_ = [None] * self.size
lowerCAmelCase_ = 0
while queue:
lowerCAmelCase_ = queue.popleft()
lowerCAmelCase_ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCAmelCase_ = current_distance + edge.weight
lowerCAmelCase_ = distances[edge.destination_vertex]
if (
isinstance(lowercase_ , lowercase_ )
and new_distance >= dest_vertex_distance
):
continue
lowerCAmelCase_ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14
| 1
|
from __future__ import annotations
def A ( _SCREAMING_SNAKE_CASE ) -> bool:
lowerCamelCase : int = str(_SCREAMING_SNAKE_CASE )
return len(_SCREAMING_SNAKE_CASE ) == 9 and set(_SCREAMING_SNAKE_CASE ) == set("123456789" )
def A ( ) -> int | None:
for base_num in range(9999 ,4999 ,-1 ):
lowerCamelCase : int = 10_0002 * base_num
if is_9_pandigital(_SCREAMING_SNAKE_CASE ):
return candidate
for base_num in range(333 ,99 ,-1 ):
lowerCamelCase : str = 100_2003 * base_num
if is_9_pandigital(_SCREAMING_SNAKE_CASE ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int] ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''], model_result['''ss'''] ):
__A = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sgugger/tiny-distilbert-classification'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, only_pretrain_model=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, torchscript=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''', '''Cant do half precision''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, fpaa=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
# set architectures equal to `None`
__A = None
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''', '''Can\'t do half precision''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], fpaa=_lowerCamelCase, multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = '''sshleifer/tinier_bart'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = '''sshleifer/tinier_bart'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, save_to_csv=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(_lowerCamelCase, '''inf_time.csv''' ), train_memory_csv_file=os.path.join(_lowerCamelCase, '''train_mem.csv''' ), inference_memory_csv_file=os.path.join(_lowerCamelCase, '''inf_mem.csv''' ), train_time_csv_file=os.path.join(_lowerCamelCase, '''train_time.csv''' ), env_info_csv_file=os.path.join(_lowerCamelCase, '''env.csv''' ), multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''env.csv''' ) ).exists() )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_lowerCamelCase : List[Any] ):
self.assertTrue(hasattr(_lowerCamelCase, '''sequential''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''cumulative''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''current''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(_lowerCamelCase, '''log.txt''' ), log_print=_lowerCamelCase, trace_memory_line_by_line=_lowerCamelCase, multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''log.txt''' ) ).exists() )
| 266
| 0
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive", [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
], )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
UpperCAmelCase__ , UpperCAmelCase__ = input_paths_and_base_extractors[compression_format]
if input_path is None:
UpperCAmelCase__ = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
assert base_extractor.is_extractable(__A )
UpperCAmelCase__ = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(__A, __A )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ = file_path.read_text(encoding="utf-8" )
else:
UpperCAmelCase__ = output_path.read_text(encoding="utf-8" )
UpperCAmelCase__ = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive", [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
], )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
UpperCAmelCase__ = input_paths[compression_format]
if input_path is None:
UpperCAmelCase__ = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
UpperCAmelCase__ = Extractor.infer_extractor_format(__A )
assert extractor_format is not None
UpperCAmelCase__ = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(__A, __A, __A )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ = file_path.read_text(encoding="utf-8" )
else:
UpperCAmelCase__ = output_path.read_text(encoding="utf-8" )
UpperCAmelCase__ = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCAmelCase_ ( __A, __A ) -> List[str]:
'''simple docstring'''
import tarfile
UpperCAmelCase__ = tmp_path / "data_dot_dot"
directory.mkdir()
UpperCAmelCase__ = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(__A, "w" ) as f:
f.add(__A, arcname=os.path.join("..", text_file.name ) )
return path
@pytest.fixture
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
import tarfile
UpperCAmelCase__ = tmp_path / "data_sym_link"
directory.mkdir()
UpperCAmelCase__ = directory / "tar_file_with_sym_link.tar"
os.symlink("..", directory / "subdir", target_is_directory=__A )
with tarfile.TarFile(__A, "w" ) as f:
f.add(str(directory / "subdir" ), arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log", [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")], )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
UpperCAmelCase__ = insecure_tar_files[insecure_tar_file]
UpperCAmelCase__ = tmp_path / "extracted"
TarExtractor.extract(__A, __A )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
UpperCAmelCase__ = (
B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(__A )
assert zipfile.is_zipfile(str(__A ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__A ) # but we're right
| 143
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __A, __A=False ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def lowerCAmelCase_ ( __A, __A, __A=False ) -> Tuple:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase__ = ""
else:
UpperCAmelCase__ = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase__ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase__ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ = in_proj_bias[: config.hidden_size]
UpperCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = dct.pop(__A )
UpperCAmelCase__ = val
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase__ = Image.open(requests.get(__A, stream=__A ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __A, __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase__ = 1_000
UpperCAmelCase__ = "huggingface/label-files"
UpperCAmelCase__ = "imagenet-1k-id2label.json"
UpperCAmelCase__ = json.load(open(hf_hub_download(__A, __A, repo_type="dataset" ), "r" ) )
UpperCAmelCase__ = {int(__A ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ = int(deit_name[-6:-4] )
UpperCAmelCase__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
UpperCAmelCase__ = 192
UpperCAmelCase__ = 768
UpperCAmelCase__ = 12
UpperCAmelCase__ = 3
elif deit_name[9:].startswith("small" ):
UpperCAmelCase__ = 384
UpperCAmelCase__ = 1_536
UpperCAmelCase__ = 12
UpperCAmelCase__ = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
UpperCAmelCase__ = 1_024
UpperCAmelCase__ = 4_096
UpperCAmelCase__ = 24
UpperCAmelCase__ = 16
# load original model from timm
UpperCAmelCase__ = timm.create_model(__A, pretrained=__A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase__ = timm_model.state_dict()
UpperCAmelCase__ = create_rename_keys(__A, __A )
for src, dest in rename_keys:
rename_key(__A, __A, __A )
read_in_q_k_v(__A, __A, __A )
# load HuggingFace model
UpperCAmelCase__ = DeiTForImageClassificationWithTeacher(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase__ = DeiTImageProcessor(size=__A, crop_size=config.image_size )
UpperCAmelCase__ = image_processor(images=prepare_img(), return_tensors="pt" )
UpperCAmelCase__ = encoding["pixel_values"]
UpperCAmelCase__ = model(__A )
UpperCAmelCase__ = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A, outputs.logits, atol=1e-3 )
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 143
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __snake_case ( UpperCAmelCase_ : Dict ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ):
lowerCamelCase_ = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowerCamelCase_ = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
lowerCamelCase_ = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
lowerCamelCase_ = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
lowerCamelCase_ = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
lowerCamelCase_ = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
lowerCamelCase_ = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
lowerCamelCase_ = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
lowerCamelCase_ = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
lowerCamelCase_ = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
lowerCamelCase_ = key.replace("image_encoder.module" , "flava.image_model" )
lowerCamelCase_ = key.replace("text_encoder.module" , "flava.text_model" )
lowerCamelCase_ = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
lowerCamelCase_ = key.replace("mm_encoder.module" , "flava.multimodal_model" )
lowerCamelCase_ = key.replace("text_projection" , "flava.text_projection" )
lowerCamelCase_ = key.replace("image_projection" , "flava.image_projection" )
lowerCamelCase_ = value.float()
for key, value in codebook_state_dict.items():
lowerCamelCase_ = value
return upgrade
@torch.no_grad()
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict=None ):
if config_path is not None:
lowerCamelCase_ = FlavaConfig.from_pretrained(UpperCAmelCase_ )
else:
lowerCamelCase_ = FlavaConfig()
lowerCamelCase_ = FlavaForPreTraining(UpperCAmelCase_ ).eval()
lowerCamelCase_ = convert_dalle_checkpoint(UpperCAmelCase_ , UpperCAmelCase_ , save_checkpoint=UpperCAmelCase_ )
if os.path.exists(UpperCAmelCase_ ):
lowerCamelCase_ = torch.load(UpperCAmelCase_ , map_location="cpu" )
else:
lowerCamelCase_ = torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location="cpu" )
lowerCamelCase_ = upgrade_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
hf_model.load_state_dict(UpperCAmelCase_ )
lowerCamelCase_ = hf_model.state_dict()
lowerCamelCase_ = count_parameters(UpperCAmelCase_ )
lowerCamelCase_ = count_parameters(UpperCAmelCase_ ) + count_parameters(UpperCAmelCase_ )
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 )
hf_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
a_ : Union[str, Any] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 55
|
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase :Tuple = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase :List[Any] = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase :Optional[Any] = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase :Union[str, Any] = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase :Tuple = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ , __magic_name__ : Union[str, Any] = randrange(len(lowerCAmelCase ) ), randrange(len(lowerCAmelCase ) )
__magic_name__ : Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
__magic_name__ , __magic_name__ : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCamelCase ( lowerCAmelCase : int = 100 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : Any = PokerHand(lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : str ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[int] = [PokerHand(lowerCAmelCase ) for hand in SORTED_HANDS]
__magic_name__ : Tuple = poker_hands.copy()
shuffle(lowerCAmelCase )
__magic_name__ : Union[str, Any] = chain(sorted(lowerCAmelCase ) )
for index, hand in enumerate(lowerCAmelCase ):
assert hand == poker_hands[index]
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = PokerHand('2C 4S AS 3D 5C' )
__magic_name__ : Optional[Any] = True
__magic_name__ : Union[str, Any] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = 0
__magic_name__ : Dict = os.path.abspath(os.path.dirname(lowerCAmelCase ) )
__magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'poker_hands.txt' )
with open(lowerCAmelCase ) as file_hand:
for line in file_hand:
__magic_name__ : Optional[int] = line[:14].strip()
__magic_name__ : List[Any] = line[15:].strip()
__magic_name__ , __magic_name__ : Tuple = PokerHand(lowerCAmelCase ), PokerHand(lowerCAmelCase )
__magic_name__ : List[Any] = player.compare_with(lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 376
| 331
| 0
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCamelCase_ ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
'''simple docstring'''
def __init__( self , __lowercase=None , **__lowercase) -> Tuple:
super().__init__(features=_snake_case)
__UpperCamelCase :str = torch_tensor_kwargs
import torch # noqa import torch at initialization
def UpperCamelCase__ ( self , __lowercase) -> Dict:
import torch
if isinstance(_snake_case , _snake_case) and column:
if all(
isinstance(_snake_case , torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column):
return torch.stack(_snake_case)
return column
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
import torch
if isinstance(_snake_case , (str, bytes, type(_snake_case))):
return value
elif isinstance(_snake_case , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
__UpperCamelCase :Dict = {}
if isinstance(_snake_case , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
__UpperCamelCase :Optional[Any] = {'''dtype''': torch.intaa}
elif isinstance(_snake_case , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
__UpperCamelCase :Dict = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_snake_case , PIL.Image.Image):
__UpperCamelCase :List[str] = np.asarray(_snake_case)
return torch.tensor(_snake_case , **{**default_dtype, **self.torch_tensor_kwargs})
def UpperCamelCase__ ( self , __lowercase) -> int:
import torch
# support for torch, tf, jax etc.
if hasattr(_snake_case , '''__array__''') and not isinstance(_snake_case , torch.Tensor):
__UpperCamelCase :List[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_snake_case , np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_snake_case) for substruct in data_struct])
elif isinstance(_snake_case , (list, tuple)):
return self._consolidate([self.recursive_tensorize(_snake_case) for substruct in data_struct])
return self._tensorize(_snake_case)
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
return map_nested(self._recursive_tensorize , _snake_case , map_list=_snake_case)
def UpperCamelCase__ ( self , __lowercase) -> Mapping:
__UpperCamelCase :Dict = self.numpy_arrow_extractor().extract_row(_snake_case)
__UpperCamelCase :Any = self.python_features_decoder.decode_row(_snake_case)
return self.recursive_tensorize(_snake_case)
def UpperCamelCase__ ( self , __lowercase) -> "torch.Tensor":
__UpperCamelCase :str = self.numpy_arrow_extractor().extract_column(_snake_case)
__UpperCamelCase :Any = self.python_features_decoder.decode_column(_snake_case , pa_table.column_names[0])
__UpperCamelCase :Any = self.recursive_tensorize(_snake_case)
__UpperCamelCase :Tuple = self._consolidate(_snake_case)
return column
def UpperCamelCase__ ( self , __lowercase) -> Mapping:
__UpperCamelCase :Union[str, Any] = self.numpy_arrow_extractor().extract_batch(_snake_case)
__UpperCamelCase :List[Any] = self.python_features_decoder.decode_batch(_snake_case)
__UpperCamelCase :str = self.recursive_tensorize(_snake_case)
for column_name in batch:
__UpperCamelCase :Any = self._consolidate(batch[column_name])
return batch
| 361
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__lowercase = logging.getLogger(__name__)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = """token-classification"""
def __init__( self , __lowercase) -> str:
if type(__lowercase) == dict:
__UpperCamelCase :List[Any] = Namespace(**__lowercase)
__UpperCamelCase :Dict = import_module('''tasks''')
try:
__UpperCamelCase :str = getattr(__lowercase , hparams.task_type)
__UpperCamelCase :TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""")
__UpperCamelCase :Tuple = self.token_classification_task.get_labels(hparams.labels)
__UpperCamelCase :Tuple = CrossEntropyLoss().ignore_index
super().__init__(__lowercase , len(self.labels) , self.mode)
def UpperCamelCase__ ( self , **__lowercase) -> List[Any]:
return self.model(**__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Any:
__UpperCamelCase :str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
__UpperCamelCase :Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
__UpperCamelCase :Dict = self(**__lowercase)
__UpperCamelCase :str = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :List[Any] = self.hparams
for mode in ["train", "dev", "test"]:
__UpperCamelCase :int = self._feature_file(__lowercase)
if os.path.exists(__lowercase) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __lowercase)
__UpperCamelCase :Any = torch.load(__lowercase)
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir)
__UpperCamelCase :Any = self.token_classification_task.read_examples_from_file(args.data_dir , __lowercase)
__UpperCamelCase :Union[str, Any] = self.token_classification_task.convert_examples_to_features(
__lowercase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet''']) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__lowercase , pad_on_left=bool(self.config.model_type in ['''xlnet''']) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __lowercase)
torch.save(__lowercase , __lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = False) -> DataLoader:
__UpperCamelCase :Tuple = self._feature_file(__lowercase)
logger.info('''Loading features from cached file %s''' , __lowercase)
__UpperCamelCase :str = torch.load(__lowercase)
__UpperCamelCase :int = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
__UpperCamelCase :Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
if features[0].token_type_ids is not None:
__UpperCamelCase :str = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
else:
__UpperCamelCase :Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long)
# HACK(we will not use this anymore soon)
__UpperCamelCase :int = torch.tensor([f.label_ids for f in features] , dtype=torch.long)
return DataLoader(
TensorDataset(__lowercase , __lowercase , __lowercase , __lowercase) , batch_size=__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Dict:
"""Compute validation""" ""
__UpperCamelCase :int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
__UpperCamelCase :Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
__UpperCamelCase :Any = self(**__lowercase)
__UpperCamelCase , __UpperCamelCase :Tuple = outputs[:2]
__UpperCamelCase :List[str] = logits.detach().cpu().numpy()
__UpperCamelCase :List[str] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
__UpperCamelCase :Tuple = torch.stack([x['''val_loss'''] for x in outputs]).mean()
__UpperCamelCase :str = np.concatenate([x['''pred'''] for x in outputs] , axis=0)
__UpperCamelCase :Any = np.argmax(__lowercase , axis=2)
__UpperCamelCase :str = np.concatenate([x['''target'''] for x in outputs] , axis=0)
__UpperCamelCase :List[str] = dict(enumerate(self.labels))
__UpperCamelCase :Tuple = [[] for _ in range(out_label_ids.shape[0])]
__UpperCamelCase :Any = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
__UpperCamelCase :Any = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__lowercase , __lowercase),
'''precision''': precision_score(__lowercase , __lowercase),
'''recall''': recall_score(__lowercase , __lowercase),
'''f1''': fa_score(__lowercase , __lowercase),
}
__UpperCamelCase :Dict = dict(results.items())
__UpperCamelCase :List[str] = results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __lowercase) -> int:
# when stable
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = self._eval_end(__lowercase)
__UpperCamelCase :Tuple = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __lowercase) -> int:
# updating to test_epoch_end instead of deprecated test_end
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[int] = self._eval_end(__lowercase)
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
__UpperCamelCase :Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __lowercase , __lowercase) -> Union[str, Any]:
# Add NER specific options
BaseTransformer.add_model_specific_args(__lowercase , __lowercase)
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__lowercase , help='''Task type to fine tune in training (e.g. NER, POS, etc)''')
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__lowercase , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__lowercase , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''')
return parser
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__lowercase = NERTransformer.add_model_specific_args(parser, os.getcwd())
__lowercase = parser.parse_args()
__lowercase = NERTransformer(args)
__lowercase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__lowercase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
__lowercase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 105
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : List[Any] = "vit"
def __init__( self, __magic_name__=768, __magic_name__=12, __magic_name__=12, __magic_name__=3072, __magic_name__="gelu", __magic_name__=0.0, __magic_name__=0.0, __magic_name__=0.02, __magic_name__=1E-12, __magic_name__=224, __magic_name__=16, __magic_name__=3, __magic_name__=True, __magic_name__=16, **__magic_name__, ) -> List[Any]:
"""simple docstring"""
super().__init__(**__magic_name__ )
UpperCamelCase__ : Any = hidden_size
UpperCamelCase__ : Optional[Any] = num_hidden_layers
UpperCamelCase__ : List[Any] = num_attention_heads
UpperCamelCase__ : Any = intermediate_size
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : List[str] = hidden_dropout_prob
UpperCamelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = initializer_range
UpperCamelCase__ : Optional[int] = layer_norm_eps
UpperCamelCase__ : str = image_size
UpperCamelCase__ : Any = patch_size
UpperCamelCase__ : int = num_channels
UpperCamelCase__ : List[str] = qkv_bias
UpperCamelCase__ : Union[str, Any] = encoder_stride
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Tuple = version.parse("1.11" )
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase__ ( self ) -> float:
"""simple docstring"""
return 1E-4
| 201
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : List[str] = "mobilenet_v2"
def __init__( self, __magic_name__=3, __magic_name__=224, __magic_name__=1.0, __magic_name__=8, __magic_name__=8, __magic_name__=6, __magic_name__=32, __magic_name__=True, __magic_name__=True, __magic_name__="relu6", __magic_name__=True, __magic_name__=0.8, __magic_name__=0.02, __magic_name__=0.001, __magic_name__=255, **__magic_name__, ) -> List[Any]:
"""simple docstring"""
super().__init__(**__magic_name__ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
UpperCamelCase__ : Union[str, Any] = num_channels
UpperCamelCase__ : int = image_size
UpperCamelCase__ : int = depth_multiplier
UpperCamelCase__ : Tuple = depth_divisible_by
UpperCamelCase__ : List[str] = min_depth
UpperCamelCase__ : Optional[int] = expand_ratio
UpperCamelCase__ : Optional[int] = output_stride
UpperCamelCase__ : Tuple = first_layer_is_expansion
UpperCamelCase__ : Union[str, Any] = finegrained_output
UpperCamelCase__ : str = hidden_act
UpperCamelCase__ : Optional[Any] = tf_padding
UpperCamelCase__ : Optional[int] = classifier_dropout_prob
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Union[str, Any] = layer_norm_eps
UpperCamelCase__ : Tuple = semantic_loss_ignore_index
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Union[str, Any] = version.parse("1.11" )
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def UpperCamelCase__ ( self ) -> float:
"""simple docstring"""
return 1E-4
| 201
| 1
|
from functools import reduce
SCREAMING_SNAKE_CASE__ = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __lowerCamelCase , __lowerCamelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(_UpperCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 359
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 297
| 0
|
_lowerCamelCase : Tuple = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowerCamelCase : Tuple = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowerCamelCase : Any = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 14
|
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 14
| 1
|
from typing import List
from .keymap import KEYMAP, get_character
def UpperCamelCase__( UpperCamelCase__ : str )->Union[str, Any]:
def decorator(UpperCamelCase__ : Dict ):
A__ = getattr(_lowerCamelCase , '''handle_key''' , [] )
handle += [key]
setattr(_lowerCamelCase , '''handle_key''' , _lowerCamelCase )
return func
return decorator
def UpperCamelCase__( *UpperCamelCase__ : List[str] )->Optional[int]:
def decorator(UpperCamelCase__ : Tuple ):
A__ = getattr(_lowerCamelCase , '''handle_key''' , [] )
handle += keys
setattr(_lowerCamelCase , '''handle_key''' , _lowerCamelCase )
return func
return decorator
class SCREAMING_SNAKE_CASE__ ( a__ ):
def __new__( cls,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = super().__new__(cls,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
if not hasattr(__lowerCamelCase,'''key_handler''' ):
setattr(__lowerCamelCase,'''key_handler''',{} )
setattr(__lowerCamelCase,'''handle_input''',KeyHandler.handle_input )
for value in attrs.values():
A__ = getattr(__lowerCamelCase,'''handle_key''',[] )
for key in handled_keys:
A__ = value
return new_cls
@staticmethod
def UpperCamelCase ( cls ):
A__ = get_character()
if char != KEYMAP["undefined"]:
A__ = ord(__lowerCamelCase )
A__ = cls.key_handler.get(__lowerCamelCase )
if handler:
A__ = char
return handler(cls )
else:
return None
def UpperCamelCase__( cls : Dict )->Optional[Any]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 369
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = 42
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self,__lowerCamelCase = 3,__lowerCamelCase = 3,__lowerCamelCase = ("DownEncoderBlock2D",),__lowerCamelCase = ("UpDecoderBlock2D",),__lowerCamelCase = (64,),__lowerCamelCase = 1,__lowerCamelCase = "silu",__lowerCamelCase = 3,__lowerCamelCase = 32,__lowerCamelCase = 256,__lowerCamelCase = 32,__lowerCamelCase = None,__lowerCamelCase = 0.18215,__lowerCamelCase = "group",):
super().__init__()
# pass init params to Encoder
A__ = Encoder(
in_channels=__lowerCamelCase,out_channels=__lowerCamelCase,down_block_types=__lowerCamelCase,block_out_channels=__lowerCamelCase,layers_per_block=__lowerCamelCase,act_fn=__lowerCamelCase,norm_num_groups=__lowerCamelCase,double_z=__lowerCamelCase,)
A__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
A__ = nn.Convad(__lowerCamelCase,__lowerCamelCase,1 )
A__ = VectorQuantizer(__lowerCamelCase,__lowerCamelCase,beta=0.25,remap=__lowerCamelCase,sane_index_shape=__lowerCamelCase )
A__ = nn.Convad(__lowerCamelCase,__lowerCamelCase,1 )
# pass init params to Decoder
A__ = Decoder(
in_channels=__lowerCamelCase,out_channels=__lowerCamelCase,up_block_types=__lowerCamelCase,block_out_channels=__lowerCamelCase,layers_per_block=__lowerCamelCase,act_fn=__lowerCamelCase,norm_num_groups=__lowerCamelCase,norm_type=__lowerCamelCase,)
@apply_forward_hook
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = True ):
A__ = self.encoder(__lowerCamelCase )
A__ = self.quant_conv(__lowerCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__lowerCamelCase )
@apply_forward_hook
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = False,__lowerCamelCase = True ):
# also go through quantization layer
if not force_not_quantize:
A__ , A__ , A__ = self.quantize(__lowerCamelCase )
else:
A__ = h
A__ = self.post_quant_conv(__lowerCamelCase )
A__ = self.decoder(__lowerCamelCase,quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = True ):
A__ = sample
A__ = self.encode(__lowerCamelCase ).latents
A__ = self.decode(__lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCamelCase )
| 39
| 0
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase__ : List[Any] = logging.get_logger(__name__)
# General docstring
lowerCAmelCase__ : str = '''RegNetConfig'''
# Base docstring
lowerCAmelCase__ : List[Any] = '''facebook/regnet-y-040'''
lowerCAmelCase__ : Tuple = [1, 10_88, 7, 7]
# Image classification docstring
lowerCAmelCase__ : Tuple = '''facebook/regnet-y-040'''
lowerCAmelCase__ : str = '''tabby, tabby cat'''
lowerCAmelCase__ : List[str] = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __snake_case ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 3 , __UpperCamelCase = 1 , __UpperCamelCase = 1 , __UpperCamelCase = "relu" , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case__ : Optional[Any] = nn.Convad(
__UpperCamelCase , __UpperCamelCase , kernel_size=__UpperCamelCase , stride=__UpperCamelCase , padding=kernel_size // 2 , groups=__UpperCamelCase , bias=__UpperCamelCase , )
snake_case__ : List[str] = nn.BatchNormad(__UpperCamelCase )
snake_case__ : Union[str, Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
snake_case__ : Optional[int] = self.convolution(__UpperCamelCase )
snake_case__ : str = self.normalization(__UpperCamelCase )
snake_case__ : Optional[Any] = self.activation(__UpperCamelCase )
return hidden_state
class __snake_case ( nn.Module ):
def __init__( self , __UpperCamelCase ) -> Tuple:
'''simple docstring'''
super().__init__()
snake_case__ : Optional[int] = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
snake_case__ : Optional[int] = config.num_channels
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
snake_case__ : List[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
snake_case__ : List[Any] = self.embedder(__UpperCamelCase )
return hidden_state
class __snake_case ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 2 ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case__ : Optional[int] = nn.Convad(__UpperCamelCase , __UpperCamelCase , kernel_size=1 , stride=__UpperCamelCase , bias=__UpperCamelCase )
snake_case__ : Any = nn.BatchNormad(__UpperCamelCase )
def __a ( self , __UpperCamelCase ) -> Tensor:
'''simple docstring'''
snake_case__ : Optional[int] = self.convolution(__UpperCamelCase )
snake_case__ : Tuple = self.normalization(__UpperCamelCase )
return hidden_state
class __snake_case ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
'''simple docstring'''
super().__init__()
snake_case__ : str = nn.AdaptiveAvgPoolad((1, 1) )
snake_case__ : List[Any] = nn.Sequential(
nn.Convad(__UpperCamelCase , __UpperCamelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(__UpperCamelCase , __UpperCamelCase , kernel_size=1 ) , nn.Sigmoid() , )
def __a ( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = self.pooler(__UpperCamelCase )
snake_case__ : str = self.attention(__UpperCamelCase )
snake_case__ : Dict = hidden_state * attention
return hidden_state
class __snake_case ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 ) -> Optional[int]:
'''simple docstring'''
super().__init__()
snake_case__ : List[str] = in_channels != out_channels or stride != 1
snake_case__ : Tuple = max(1 , out_channels // config.groups_width )
snake_case__ : Optional[Any] = (
RegNetShortCut(__UpperCamelCase , __UpperCamelCase , stride=__UpperCamelCase ) if should_apply_shortcut else nn.Identity()
)
snake_case__ : List[str] = nn.Sequential(
RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , stride=__UpperCamelCase , groups=__UpperCamelCase , activation=config.hidden_act ) , RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , kernel_size=1 , activation=__UpperCamelCase ) , )
snake_case__ : Union[str, Any] = ACTaFN[config.hidden_act]
def __a ( self , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = hidden_state
snake_case__ : Optional[int] = self.layer(__UpperCamelCase )
snake_case__ : List[Any] = self.shortcut(__UpperCamelCase )
hidden_state += residual
snake_case__ : Union[str, Any] = self.activation(__UpperCamelCase )
return hidden_state
class __snake_case ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 ) -> List[str]:
'''simple docstring'''
super().__init__()
snake_case__ : int = in_channels != out_channels or stride != 1
snake_case__ : List[str] = max(1 , out_channels // config.groups_width )
snake_case__ : Union[str, Any] = (
RegNetShortCut(__UpperCamelCase , __UpperCamelCase , stride=__UpperCamelCase ) if should_apply_shortcut else nn.Identity()
)
snake_case__ : Dict = nn.Sequential(
RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , stride=__UpperCamelCase , groups=__UpperCamelCase , activation=config.hidden_act ) , RegNetSELayer(__UpperCamelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , kernel_size=1 , activation=__UpperCamelCase ) , )
snake_case__ : Optional[int] = ACTaFN[config.hidden_act]
def __a ( self , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : Tuple = hidden_state
snake_case__ : str = self.layer(__UpperCamelCase )
snake_case__ : Dict = self.shortcut(__UpperCamelCase )
hidden_state += residual
snake_case__ : Any = self.activation(__UpperCamelCase )
return hidden_state
class __snake_case ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 2 , __UpperCamelCase = 2 , ) -> Any:
'''simple docstring'''
super().__init__()
snake_case__ : Any = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
snake_case__ : Union[str, Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , stride=__UpperCamelCase , ) , *[layer(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for _ in range(depth - 1 )] , )
def __a ( self , __UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = self.layers(__UpperCamelCase )
return hidden_state
class __snake_case ( nn.Module ):
def __init__( self , __UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case__ : int = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
__UpperCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
snake_case__ : Any = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__UpperCamelCase , config.depths[1:] ):
self.stages.append(RegNetStage(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , depth=__UpperCamelCase ) )
def __a ( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
snake_case__ : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case__ : int = hidden_states + (hidden_state,)
snake_case__ : Any = stage_module(__UpperCamelCase )
if output_hidden_states:
snake_case__ : Any = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__UpperCamelCase , hidden_states=__UpperCamelCase )
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = RegNetConfig
__lowerCamelCase = """regnet"""
__lowerCamelCase = """pixel_values"""
__lowerCamelCase = True
def __a ( self , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
if isinstance(__UpperCamelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(__UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __a ( self , __UpperCamelCase , __UpperCamelCase=False ) -> Dict:
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case__ : Any = value
lowerCAmelCase__ : Any = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCAmelCase__ : Optional[int] = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" ,_lowerCamelCase ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __snake_case ( _lowerCamelCase ):
def __init__( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
super().__init__(__UpperCamelCase )
snake_case__ : List[Any] = config
snake_case__ : Any = RegNetEmbeddings(__UpperCamelCase )
snake_case__ : List[Any] = RegNetEncoder(__UpperCamelCase )
snake_case__ : Any = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __a ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
snake_case__ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : str = self.embedder(__UpperCamelCase )
snake_case__ : Tuple = self.encoder(
__UpperCamelCase , output_hidden_states=__UpperCamelCase , return_dict=__UpperCamelCase )
snake_case__ : Union[str, Any] = encoder_outputs[0]
snake_case__ : Optional[int] = self.pooler(__UpperCamelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__UpperCamelCase , pooler_output=__UpperCamelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" ,_lowerCamelCase ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __snake_case ( _lowerCamelCase ):
def __init__( self , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__(__UpperCamelCase )
snake_case__ : List[str] = config.num_labels
snake_case__ : int = RegNetModel(__UpperCamelCase )
# classification head
snake_case__ : str = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __a ( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
snake_case__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : Optional[int] = self.regnet(__UpperCamelCase , output_hidden_states=__UpperCamelCase , return_dict=__UpperCamelCase )
snake_case__ : Any = outputs.pooler_output if return_dict else outputs[1]
snake_case__ : Optional[int] = self.classifier(__UpperCamelCase )
snake_case__ : str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case__ : Optional[int] = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case__ : Union[str, Any] = 'single_label_classification'
else:
snake_case__ : Optional[int] = 'multi_label_classification'
if self.config.problem_type == "regression":
snake_case__ : List[Any] = MSELoss()
if self.num_labels == 1:
snake_case__ : Tuple = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case__ : Optional[Any] = loss_fct(__UpperCamelCase , __UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
snake_case__ : int = CrossEntropyLoss()
snake_case__ : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case__ : int = BCEWithLogitsLoss()
snake_case__ : str = loss_fct(__UpperCamelCase , __UpperCamelCase )
if not return_dict:
snake_case__ : str = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__UpperCamelCase , logits=__UpperCamelCase , hidden_states=outputs.hidden_states )
| 143
|
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : int = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowerCAmelCase__ : Union[str, Any] = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def UpperCamelCase__ ( A__ , A__ ) -> List[str]:
snake_case__ : Optional[Any] = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
snake_case__ : Dict = int(re.match(r'.*layer_(\d*).*' , A__ )[1] )
layer_number -= 3
return F"""h.{layer_number}.""" + key
def UpperCamelCase__ ( A__ ) -> str:
if dtype == torch.bool:
return 1 / 8
snake_case__ : List[str] = re.search(r'[^\d](\d+)$' , str(A__ ) )
if bit_search is None:
raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" )
snake_case__ : Union[str, Any] = int(bit_search.groups()[0] )
return bit_size // 8
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ ) -> List[str]:
# Construct model
if bloom_config_file == "":
snake_case__ : Union[str, Any] = BloomConfig()
else:
snake_case__ : int = BloomConfig.from_json_file(A__ )
if shard_model:
snake_case__ : Tuple = os.listdir(A__ )
snake_case__ : str = sorted(filter(lambda A__ : s.startswith('layer' ) and "model_00" in s , A__ ) )
snake_case__ : str = {'weight_map': {}, 'metadata': {}}
snake_case__ : Optional[int] = 0
snake_case__ : Tuple = None
snake_case__ : Any = BloomConfig()
for j, file in enumerate(A__ ):
print('Processing file: {}'.format(A__ ) )
snake_case__ : str = None
for i in range(A__ ):
# load all TP files
snake_case__ : Optional[int] = file.replace('model_00' , F"""model_0{i}""" )
snake_case__ : int = torch.load(os.path.join(A__ , A__ ) , map_location='cpu' )
# Rename keys in the transformers names
snake_case__ : List[Any] = list(temp.keys() )
for key in keys:
snake_case__ : List[Any] = temp.pop(A__ )
if tensors is None:
snake_case__ : Optional[Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case__ : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case__ : Optional[int] = torch.cat([tensors[key], temp[key]] , dim=A__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case__ : Dict = tensors[key] / pretraining_tp
torch.save(
A__ , os.path.join(
A__ , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(A__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
snake_case__ : List[Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
snake_case__ : Optional[int] = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(A__ ) ).zfill(5 ) )
snake_case__ : Dict = BloomConfig()
snake_case__ : str = pytorch_dump_folder_path + '/' + CONFIG_NAME
snake_case__ : int = total_size
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A__ , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
snake_case__ : List[str] = json.dumps(A__ , indent=2 , sort_keys=A__ ) + '\n'
f.write(A__ )
else:
snake_case__ : int = BloomModel(A__ )
snake_case__ : Dict = os.listdir(A__ )
snake_case__ : Union[str, Any] = sorted(filter(lambda A__ : s.startswith('layer' ) and "model_00" in s , A__ ) )
snake_case__ : List[str] = None
for i, file in enumerate(A__ ):
snake_case__ : Dict = None
for i in range(A__ ):
# load all TP files
snake_case__ : List[Any] = file.replace('model_00' , F"""model_0{i}""" )
snake_case__ : int = torch.load(os.path.join(A__ , A__ ) , map_location='cpu' )
# Rename keys in the transformers names
snake_case__ : List[str] = list(temp.keys() )
for key in keys:
snake_case__ : Any = temp.pop(A__ )
if tensors is None:
snake_case__ : Union[str, Any] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case__ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case__ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=A__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case__ : Optional[int] = tensors[key] / pretraining_tp
snake_case__ : int = model.load_state_dict(A__ , strict=A__ )
assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
snake_case__ : List[Any] = set(other_keys.missing_keys )
else:
snake_case__ : Tuple = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(A__ , exist_ok=A__ )
snake_case__ : Any = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
snake_case__ : List[Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
snake_case__ : str = model.to(config.torch_dtype )
torch.save(model.state_dict() , A__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowerCAmelCase__ : Any = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 143
| 1
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _snake_case ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
lowercase_ : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Dict = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
lowercase_ : str = floats_tensor((1, 3, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _snake_case ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def _snake_case ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _snake_case ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _snake_case ( self ):
"""simple docstring"""
self._test_save_load_local()
def _snake_case ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 353
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : str = "▁"
_lowercase : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
_lowercase : Dict = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
_lowercase : Optional[Any] = {
"facebook/xglm-564M": 2_0_4_8,
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase_ : Optional[Any] = 7
lowercase_ : List[Any] = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowercase_ : Tuple = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowercase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ : List[Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase_ : Dict = len(self.sp_model )
lowercase_ : int = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
lowercase_ : List[Any] = self.__dict__.copy()
lowercase_ : Optional[Any] = None
lowercase_ : List[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : Optional[Any] = {}
lowercase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase_ : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE ))
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE ))
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ : str = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : str = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowercase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 264
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __UpperCamelCase :
@staticmethod
def __a ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
pass
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->Dict:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a : Optional[Any] = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
lowerCamelCase : Union[str, Any] =MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : Tuple = pipeline(
"document-question-answering" , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a : Optional[int] = INVOICE_URL
a : str = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
a : Union[str, Any] = "What is the placebo?"
a : Dict = [
{
"image": load_image(lowerCAmelCase__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Tuple = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ ), "start": ANY(lowerCAmelCase__ ), "end": ANY(lowerCAmelCase__ )},
{"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ ), "start": ANY(lowerCAmelCase__ ), "end": ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __a ( self ) -> List[Any]:
a : List[Any] = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
a : Dict = INVOICE_URL
a : List[str] = "How many cats are there?"
a : Tuple = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
a : Optional[int] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
a : Optional[int] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
a : List[Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
a : Any = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
a : Optional[int] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
a : Tuple = []
a : Optional[int] = []
a : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __a ( self ) -> Tuple:
a : int = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
a : List[str] = INVOICE_URL
a : List[Any] = "What is the invoice number?"
a : int = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
a : str = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
a : Any = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __a ( self ) -> Optional[int]:
a : List[str] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
a : Optional[Any] = INVOICE_URL
a : Tuple = "What is the invoice number?"
a : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
a : str = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
a : Tuple = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __a ( self ) -> str:
a : Optional[int] = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase__ )
a : int = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase__ , revision="3dc6de3" , )
a : List[Any] = INVOICE_URL
a : Tuple = "What is the invoice number?"
a : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
a : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
a : List[Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
a : Dict = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
# This model should also work if `image` is set to None
a : Optional[Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __a ( self ) -> Tuple:
a : int = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase__ )
a : Tuple = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase__ , revision="3dc6de3" , max_seq_len=50 , )
a : List[str] = INVOICE_URL
a : Union[str, Any] = "What is the invoice number?"
a : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
a : List[str] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
a : List[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
# This model should also work if `image` is set to None
a : Any = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def __a ( self ) -> int:
a : Tuple = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
a : Optional[Any] = INVOICE_URL
a : Tuple = "What is the invoice number?"
a : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def __a ( self ) -> int:
pass
| 105
| 0
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A (enum.Enum ):
'''simple docstring'''
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Optional[int] = 1
__lowerCamelCase : Optional[Any] = 2
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : List[Any] = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self : List[Any] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
A__ = None
if self.model.config.prefix is not None:
A__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
A__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
A__ , A__ , A__ = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params )
A__ = {**self._preprocess_params, **preprocess_params}
A__ = {**self._forward_params, **forward_params}
def a_ ( self : Dict , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
A__ = {}
if prefix is not None:
A__ = prefix
if prefix:
A__ = self.tokenizer(
__lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework )
A__ = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'
""" [None, 'hole']""" )
A__ = handle_long_generation
preprocess_params.update(__lowerCAmelCase )
A__ = generate_kwargs
A__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
A__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
A__ = ReturnType.TENSORS
if return_type is not None:
A__ = return_type
if clean_up_tokenization_spaces is not None:
A__ = clean_up_tokenization_spaces
if stop_sequence is not None:
A__ = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
if len(__lowerCAmelCase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
A__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a_ ( self : Any , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Dict ) -> Dict:
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase )
def __call__( self : str , __lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[str]="" , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
A__ = self.tokenizer(
prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework )
A__ = prompt_text
if handle_long_generation == "hole":
A__ = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
A__ = generate_kwargs["""max_new_tokens"""]
else:
A__ = generate_kwargs.get("""max_length""" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
A__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
A__ = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
A__ = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def a_ ( self : Dict , __lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Tuple ) -> Any:
"""simple docstring"""
A__ = model_inputs["""input_ids"""]
A__ = model_inputs.get("""attention_mask""" , __lowerCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
A__ = None
A__ = None
A__ = 1
else:
A__ = input_ids.shape[0]
A__ = model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
A__ = generate_kwargs.pop("""prefix_length""" , 0 )
if prefix_length > 0:
A__ = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
A__ = generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
A__ = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
A__ = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase )
A__ = generated_sequence.shape[0]
if self.framework == "pt":
A__ = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
A__ = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=ReturnType.FULL_TEXT , __lowerCAmelCase : Tuple=True ) -> Union[str, Any]:
"""simple docstring"""
A__ = model_outputs["""generated_sequence"""][0]
A__ = model_outputs["""input_ids"""]
A__ = model_outputs["""prompt_text"""]
A__ = generated_sequence.numpy().tolist()
A__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
A__ = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
A__ = self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
A__ = 0
else:
A__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ) )
if return_type == ReturnType.FULL_TEXT:
A__ = prompt_text + text[prompt_length:]
else:
A__ = text[prompt_length:]
A__ = {"""generated_text""": all_text}
records.append(__lowerCAmelCase )
return records
| 276
|
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
A : Tuple = parser.parse_args()
if args.model_type == "bert":
A : Dict = BertForMaskedLM.from_pretrained(args.model_name)
A : List[str] = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
A : Optional[Any] = model.state_dict()
A : int = {}
for w in ["word_embeddings", "position_embeddings"]:
A : str = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
A : Any = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
A : Tuple = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
A : Optional[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
A : Optional[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
A : Optional[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
A : int = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
A : List[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
A : List[str] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
A : Union[str, Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
A : List[str] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
A : int = state_dict['''cls.predictions.decoder.weight''']
A : str = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
A : List[Any] = state_dict[F'''cls.predictions.transform.dense.{w}''']
A : List[str] = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 276
| 1
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# TODO: upload to AWS
SCREAMING_SNAKE_CASE__ = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'retribert'
def __init__( self , lowercase=30_522 , lowercase=768 , lowercase=8 , lowercase=12 , lowercase=3_072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=True , lowercase=128 , lowercase=0 , **lowercase , ) -> str:
super().__init__(pad_token_id=lowercase , **lowercase )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = share_encoders
lowerCAmelCase = projection_dim
| 46
|
'''simple docstring'''
def lowerCamelCase__ ( _A , _A ):
while second != 0:
a : Union[str, Any] = first & second
first ^= second
a : Tuple = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase: Optional[int] = int(input('Enter the first number: ').strip())
lowerCAmelCase: Union[str, Any] = int(input('Enter the second number: ').strip())
print(F"{add(first, second) = }")
| 297
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : List[Any] = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 337
|
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __a ( ) ->str:
"""simple docstring"""
A = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=UpperCAmelCase , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=UpperCAmelCase , default=5 )
parser.add_argument("""--batch_size""" , type=UpperCAmelCase , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=UpperCAmelCase , default=1 )
parser.add_argument("""--freeze""" , type=UpperCAmelCase , default=UpperCAmelCase )
parser.add_argument("""--learning_rate""" , type=UpperCAmelCase , default=5E-4 )
parser.add_argument("""--seed""" , type=UpperCAmelCase , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=UpperCAmelCase , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=UpperCAmelCase , default=10 )
parser.add_argument("""--weight_decay""" , type=UpperCAmelCase , default=0.01 )
parser.add_argument("""--output_dir""" , type=UpperCAmelCase , default="""./results""" )
return parser.parse_args()
_lowerCamelCase : Optional[Any] = load('accuracy')
def __a ( UpperCAmelCase ) ->Any:
"""simple docstring"""
A , A = eval_pred
A = np.argmax(UpperCAmelCase , axis=1 )
return metric.compute(predictions=UpperCAmelCase , references=UpperCAmelCase )
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Union[str, Any] , _lowerCAmelCase : Any ):
super().__init__()
A = trainer
def A (self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , **_lowerCAmelCase : List[Any] ):
if control.should_evaluate:
A = deepcopy(_lowerCAmelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def __a ( ) ->Optional[int]:
"""simple docstring"""
A = get_args()
set_seed(args.seed )
A = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
A = dataset.train_test_split(test_size=0.2 )
A = train_test["""test"""].train_test_split(test_size=0.5 )
A = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
A = AutoTokenizer.from_pretrained(args.model_ckpt )
A = tokenizer.eos_token
A = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
A = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
A = False
A = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(UpperCAmelCase ):
A = tokenizer(example["""src"""] , truncation=UpperCAmelCase , max_length=1024 )
A = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
A = train_test_validation.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=train_test_validation["""train"""].column_names , )
A = DataCollatorWithPadding(tokenizer=UpperCAmelCase )
A = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
A = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=UpperCAmelCase , data_collator=UpperCAmelCase , compute_metrics=UpperCAmelCase , )
print("""Training...""" )
trainer.add_callback(CustomCallback(UpperCAmelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 337
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : Any = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='bert'
def __init__( self : Dict , __a : Dict=3_05_22 , __a : int=7_68 , __a : Any=12 , __a : Tuple=12 , __a : List[str]=30_72 , __a : int="gelu" , __a : List[str]=0.1 , __a : Union[str, Any]=0.1 , __a : str=5_12 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : int=1e-1_2 , __a : Tuple=0 , __a : Tuple="absolute" , __a : Optional[Any]=True , __a : Optional[Any]=None , **__a : List[Any] , ):
super().__init__(pad_token_id=__a , **__a )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = classifier_dropout
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Dict ):
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 63
|
def __A ( __lowerCAmelCase )-> list:
"""simple docstring"""
if len(__lowerCAmelCase ) < 2:
return collection
def circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
_UpperCAmelCase = False
if low == high:
return swapped
_UpperCAmelCase = low
_UpperCAmelCase = high
while left < right:
if collection[left] > collection[right]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right],
collection[left],
)
_UpperCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right + 1],
collection[left],
)
_UpperCAmelCase = True
_UpperCAmelCase = low + int((high - low) / 2 )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
return swapped or left_swap or right_swap
_UpperCAmelCase = True
while is_not_sorted is True:
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) - 1 )
return collection
if __name__ == "__main__":
_a = input('''Enter numbers separated by a comma:\n''').strip()
_a = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 39
| 0
|
import numpy as np
import datasets
__UpperCAmelCase = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
__UpperCAmelCase = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
__UpperCAmelCase = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase (datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'X': datasets.Sequence(datasets.Value('float' , id='sequence' ) , id='X' ),
} ) , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
# convert to numpy arrays
UpperCAmelCase_ : Dict = np.array(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = np.array(_UpperCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('Expected `X` to be a 2D vector' )
if len(reference_distribution.shape ) != 2:
raise ValueError('Expected `reference_distribution` to be a 2D vector' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' )
# Get mahalanobis distance for each prediction
UpperCAmelCase_ : Dict = X - np.mean(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = np.cov(reference_distribution.T )
try:
UpperCAmelCase_ : List[Any] = np.linalg.inv(_UpperCamelCase )
except np.linalg.LinAlgError:
UpperCAmelCase_ : Tuple = np.linalg.pinv(_UpperCamelCase )
UpperCAmelCase_ : Any = np.dot(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = np.dot(_UpperCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 145
|
def lowercase__ ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[Any] ):
'''simple docstring'''
if index == r:
for j in range(__snake_case ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCAmelCase_ : int = arr[i]
combination_util(__snake_case , __snake_case , __snake_case , index + 1 , __snake_case , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__snake_case , __snake_case , __snake_case , 0 , __snake_case , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__UpperCAmelCase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 145
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Dict = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowercase_ ( lowerCAmelCase__ ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = """ctrl"""
UpperCAmelCase_ : List[Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __SCREAMING_SNAKE_CASE=246534 , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=1280 , __SCREAMING_SNAKE_CASE=8192 , __SCREAMING_SNAKE_CASE=48 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) ->Dict:
lowerCAmelCase = vocab_size
lowerCAmelCase = n_positions
lowerCAmelCase = n_embd
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = dff
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = use_cache
super().__init__(**lowercase_ )
| 338
|
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __lowercase ( _a="" ):
snake_case_ : List[str] = tempfile.mkdtemp()
return os.path.join(_a , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : str ):
snake_case_ : int = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : Optional[int] = AgentAudio(lowercase_ )
snake_case_ : List[str] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowercase_ ) )
# Ensure that the file contains the same value as the original tensor
snake_case_, snake_case_ : int = sf.read(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , torch.tensor(lowercase_ ) , atol=1E-4 ) )
def _snake_case ( self : Optional[int] ):
snake_case_ : Any = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : List[str] = get_new_path(suffix='''.wav''' )
sf.write(lowercase_ , lowercase_ , 16000 )
snake_case_ : Tuple = AgentAudio(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowercase_ )
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Tuple ):
snake_case_ : List[Any] = torch.randint(0 , 256 , (64, 64, 3) )
snake_case_ : str = AgentImage(lowercase_ )
snake_case_ : Union[str, Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
def _snake_case ( self : str ):
snake_case_ : Any = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case_ : Optional[int] = Image.open(lowercase_ )
snake_case_ : Tuple = AgentImage(lowercase_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
def _snake_case ( self : str ):
snake_case_ : int = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case_ : Dict = Image.open(lowercase_ )
snake_case_ : List[str] = AgentImage(lowercase_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Any ):
snake_case_ : Tuple = '''Hey!'''
snake_case_ : Optional[Any] = AgentText(lowercase_ )
self.assertEqual(lowercase_ , agent_type.to_string() )
self.assertEqual(lowercase_ , agent_type.to_raw() )
self.assertEqual(lowercase_ , lowercase_ )
| 264
| 0
|
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=0 ) -> Any:
'''simple docstring'''
if name is None:
snake_case_ = None
else:
snake_case_ = '''.''' * max(0, spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}'''
snake_case_ = fmt.format(__UpperCAmelCase )
# Print and recurse (if needed).
if isinstance(__UpperCAmelCase, __UpperCAmelCase ):
if msg is not None:
print(__UpperCAmelCase )
for k in val.keys():
recursive_print(__UpperCAmelCase, val[k], spaces + 2 )
elif isinstance(__UpperCAmelCase, torch.Tensor ):
print(__UpperCAmelCase, ''':''', val.size() )
else:
print(__UpperCAmelCase, ''':''', __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
snake_case_ = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ = param.view(*__UpperCAmelCase )
snake_case_ = param.transpose(0, 2 )
snake_case_ = param.transpose(1, 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ = param.view(*__UpperCAmelCase )
snake_case_ = param.transpose(0, 1 ).contiguous()
snake_case_ = param.view(*__UpperCAmelCase )
return param
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
snake_case_ = {}
# old versions did not store training args
snake_case_ = input_state_dict.get('''args''', __UpperCAmelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ = ds_args.padded_vocab_size
snake_case_ = ds_args.max_position_embeddings
snake_case_ = ds_args.hidden_size
snake_case_ = ds_args.num_layers
snake_case_ = ds_args.num_attention_heads
snake_case_ = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ = config.n_head
# The hidden_size per head.
snake_case_ = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ = input_state_dict['''checkpoint_version''']
else:
snake_case_ = 0.0
# The model.
snake_case_ = input_state_dict['''model''']
# The language model.
snake_case_ = model['''language_model''']
# The embeddings.
snake_case_ = lm['''embedding''']
# The word embeddings.
snake_case_ = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
snake_case_ = word_embeddings[: config.vocab_size, :]
snake_case_ = word_embeddings
# The position embeddings.
snake_case_ = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
snake_case_ = pos_embeddings
# The transformer.
snake_case_ = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
snake_case_ = re.compile(r'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
snake_case_ = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ = layer_re.match(__UpperCAmelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ = int(m.group(1 ) )
# The name of the operation.
snake_case_ = m.group(2 )
# Is it a weight or a bias?
snake_case_ = m.group(3 )
# The name of the layer.
snake_case_ = F"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
snake_case_ = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
snake_case_ = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ = torch.tril(torch.ones((n_positions, n_positions), dtype=torch.floataa ) ).view(
1, 1, __UpperCAmelCase, __UpperCAmelCase )
snake_case_ = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ = torch.tensor(-1e4, dtype=torch.floataa )
snake_case_ = masked_bias
snake_case_ = fix_query_key_value_ordering(__UpperCAmelCase, __UpperCAmelCase, 3, __UpperCAmelCase, __UpperCAmelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ = out_val.transpose(0, 1 ).contiguous()
# Store.
snake_case_ = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ = fix_query_key_value_ordering(__UpperCAmelCase, __UpperCAmelCase, 3, __UpperCAmelCase, __UpperCAmelCase )
# Store. No change of shape.
snake_case_ = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ = megatron_to_transformers[op_name]
snake_case_ = val.transpose(0, 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ = megatron_to_transformers[op_name]
snake_case_ = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ = transformer['''final_layernorm.weight''']
snake_case_ = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ = word_embeddings
# It should be done!
return output_state_dict
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''', action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''', type=__UpperCAmelCase, help='''Path to the checkpoint file (.zip archive or direct .pt file)''', )
parser.add_argument(
'''--config_file''', default='''''', type=__UpperCAmelCase, help='''An optional config json file describing the pre-trained model.''', )
snake_case_ = parser.parse_args()
# Extract the basename.
snake_case_ = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint, '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
snake_case_ = torch.load(__UpperCAmelCase, map_location='''cpu''' )
else:
snake_case_ = torch.load(args.path_to_checkpoint, map_location='''cpu''' )
snake_case_ = input_state_dict.get('''args''', __UpperCAmelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ = '''gelu_fast'''
elif ds_args.openai_gelu:
snake_case_ = '''gelu_new'''
else:
snake_case_ = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
snake_case_ = '''gelu_new'''
# Spell out all parameters in case the defaults change.
snake_case_ = GPTaConfig(
vocab_size=5_0257, n_positions=1024, n_embd=1024, n_layer=24, n_head=16, n_inner=4096, activation_function=__UpperCAmelCase, resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-5, initializer_range=0.0_2, summary_type='''cls_index''', summary_use_proj=__UpperCAmelCase, summary_activation=__UpperCAmelCase, summary_proj_to_labels=__UpperCAmelCase, summary_first_dropout=0.1, scale_attn_weights=__UpperCAmelCase, use_cache=__UpperCAmelCase, bos_token_id=5_0256, eos_token_id=5_0256, )
else:
snake_case_ = GPTaConfig.from_json_file(args.config_file )
snake_case_ = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
snake_case_ = convert_megatron_checkpoint(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCAmelCase, __UpperCAmelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
snake_case_ = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"Unrecognized tokenizer_type {tokenizer_type}" )
else:
snake_case_ = '''gpt2'''
snake_case_ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
snake_case_ = type(__UpperCAmelCase ).__name__
snake_case_ = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(__UpperCAmelCase )
# Save tokenizer based on args
print(F"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(__UpperCAmelCase )
# Store the state_dict to file.
snake_case_ = os.path.join(__UpperCAmelCase, '''pytorch_model.bin''' )
print(F"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(__UpperCAmelCase, __UpperCAmelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 72
|
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Tuple = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class a ( _lowerCamelCase ):
snake_case_ = "efficientnet"
def __init__( self : int , lowercase_ : int = 3 , lowercase_ : int = 600 , lowercase_ : float = 2.0 , lowercase_ : float = 3.1 , lowercase_ : int = 8 , lowercase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowercase_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowercase_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowercase_ : List[int] = [] , lowercase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowercase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowercase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowercase_ : float = 0.25 , lowercase_ : str = "swish" , lowercase_ : int = 2560 , lowercase_ : str = "mean" , lowercase_ : float = 0.02 , lowercase_ : float = 0.001 , lowercase_ : float = 0.99 , lowercase_ : float = 0.5 , lowercase_ : float = 0.2 , **lowercase_ : Optional[Any] , ):
super().__init__(**lowercase_ )
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = width_coefficient
snake_case_ = depth_coefficient
snake_case_ = depth_divisor
snake_case_ = kernel_sizes
snake_case_ = in_channels
snake_case_ = out_channels
snake_case_ = depthwise_padding
snake_case_ = strides
snake_case_ = num_block_repeats
snake_case_ = expand_ratios
snake_case_ = squeeze_expansion_ratio
snake_case_ = hidden_act
snake_case_ = hidden_dim
snake_case_ = pooling_type
snake_case_ = initializer_range
snake_case_ = batch_norm_eps
snake_case_ = batch_norm_momentum
snake_case_ = dropout_rate
snake_case_ = drop_connect_rate
snake_case_ = sum(lowercase_ ) * 4
class a ( _lowerCamelCase ):
snake_case_ = version.parse("1.11" )
@property
def A_ ( self : Optional[int] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def A_ ( self : List[str] ):
return 1e-5
| 72
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
A__: Any = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: str = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
A__: int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276
|
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : str ) -> Dict:
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" ,[False, True] )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : str ) -> Optional[Any]:
_a : Any =tmp_path / """cache"""
_a : int ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a : Tuple =SqlDatasetReader(
"""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ).read()
_check_sql_dataset(_UpperCAmelCase ,_UpperCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" ,[
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ) -> List[Any]:
_a : Union[str, Any] =tmp_path / """cache"""
_a : str ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_a : Optional[int] =features.copy() if features else default_expected_features
_a : Union[str, Any] =(
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_a : Optional[Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ).read()
_check_sql_dataset(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ) -> List[str]:
with contextlib.closing(sqlitea.connect(_UpperCAmelCase ) ) as con:
_a : Any =con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ) -> Union[str, Any]:
_a : Union[str, Any] =tmp_path / """cache"""
_a : Union[str, Any] =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Tuple =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=1 ).write()
_a : Tuple =iter_sql_file(_UpperCAmelCase )
_a : List[Any] =iter_sql_file(_UpperCAmelCase )
for rowa, rowa in zip(_UpperCAmelCase ,_UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ,_UpperCAmelCase : List[Any] ) -> Optional[int]:
_a : int =tmp_path / """cache"""
_a : Any =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Union[str, Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=2 ).write()
_a : List[Any] =iter_sql_file(_UpperCAmelCase )
_a : str =iter_sql_file(_UpperCAmelCase )
for rowa, rowa in zip(_UpperCAmelCase ,_UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : List[Any] ) -> List[str]:
_a : List[str] =tmp_path / """cache"""
_a : Dict =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Optional[Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
with pytest.raises(_UpperCAmelCase ):
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=0 ).write()
| 276
| 1
|
import string
import numpy
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , UpperCAmelCase )
class __UpperCAmelCase :
UpperCamelCase = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCamelCase = numpy.vectorize(lambda lowerCamelCase__ : x % 3_6 )
UpperCamelCase = numpy.vectorize(lowerCamelCase__ )
def __init__( self : List[Any], __A : numpy.ndarray ):
UpperCAmelCase : Optional[Any] = self.modulus(__A ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
UpperCAmelCase : Optional[int] = encrypt_key.shape[0]
def __magic_name__ ( self : Union[str, Any], __A : str ):
return self.key_string.index(__A )
def __magic_name__ ( self : Dict, __A : int ):
return self.key_string[round(__A )]
def __magic_name__ ( self : str ):
UpperCAmelCase : List[str] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCAmelCase : Dict = det % len(self.key_string )
UpperCAmelCase : Optional[Any] = len(self.key_string )
if greatest_common_divisor(__A, len(self.key_string ) ) != 1:
UpperCAmelCase : Optional[Any] = (
F'''determinant modular {req_l} of encryption key({det}) '''
F'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(__A )
def __magic_name__ ( self : Union[str, Any], __A : str ):
UpperCAmelCase : Dict = [char for char in text.upper() if char in self.key_string]
UpperCAmelCase : Optional[Any] = chars[-1]
while len(__A ) % self.break_key != 0:
chars.append(__A )
return "".join(__A )
def __magic_name__ ( self : Any, __A : str ):
UpperCAmelCase : Optional[Any] = self.process_text(text.upper() )
UpperCAmelCase : Tuple = ''''''
for i in range(0, len(__A ) - self.break_key + 1, self.break_key ):
UpperCAmelCase : Union[str, Any] = text[i : i + self.break_key]
UpperCAmelCase : str = [self.replace_letters(__A ) for char in batch]
UpperCAmelCase : List[str] = numpy.array([vec] ).T
UpperCAmelCase : Dict = self.modulus(self.encrypt_key.dot(__A ) ).T.tolist()[
0
]
UpperCAmelCase : str = ''''''.join(
self.replace_digits(__A ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCAmelCase : List[Any] = det % len(self.key_string )
UpperCAmelCase : int = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
UpperCAmelCase : Dict = i
break
UpperCAmelCase : str = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__A ) )
def __magic_name__ ( self : List[Any], __A : str ):
UpperCAmelCase : str = self.make_decrypt_key()
UpperCAmelCase : List[Any] = self.process_text(text.upper() )
UpperCAmelCase : Any = ''''''
for i in range(0, len(__A ) - self.break_key + 1, self.break_key ):
UpperCAmelCase : Dict = text[i : i + self.break_key]
UpperCAmelCase : Optional[int] = [self.replace_letters(__A ) for char in batch]
UpperCAmelCase : Optional[int] = numpy.array([vec] ).T
UpperCAmelCase : Optional[int] = self.modulus(decrypt_key.dot(__A ) ).T.tolist()[0]
UpperCAmelCase : List[Any] = ''''''.join(
self.replace_digits(__A ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a__ ( ) -> None:
UpperCAmelCase : List[Any] = int(input('''Enter the order of the encryption key: ''' ) )
UpperCAmelCase : List[str] = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = [int(UpperCAmelCase ) for x in input().split()]
hill_matrix.append(UpperCAmelCase )
UpperCAmelCase : int = HillCipher(numpy.array(UpperCAmelCase ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
UpperCAmelCase : str = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
UpperCAmelCase : Any = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(UpperCAmelCase ) )
elif option == "2":
UpperCAmelCase : int = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 99
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __UpperCAmelCase :
def __init__( self : Any, __A : List[Any], __A : Optional[Any]=2, __A : List[Any]=3_2, __A : Tuple=1_6, __A : int=3, __A : Any=True, __A : List[Any]=True, __A : List[Any]=3_2, __A : List[Any]=4, __A : Union[str, Any]=[0, 1, 2, 3], __A : List[Any]=4, __A : Optional[int]=3_7, __A : int="gelu", __A : Any=0.1, __A : Tuple=0.1, __A : Any=0.0_2, __A : List[str]=3, __A : int=[1, 3_8_4, 2_4, 2_4], __A : Any=True, __A : List[str]=None, ):
UpperCAmelCase : List[str] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Tuple = image_size
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : str = num_channels
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : Optional[Any] = use_labels
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : str = backbone_out_indices
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Dict = intermediate_size
UpperCAmelCase : Union[str, Any] = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : str = initializer_range
UpperCAmelCase : Optional[int] = num_labels
UpperCAmelCase : int = backbone_featmap_shape
UpperCAmelCase : Union[str, Any] = scope
UpperCAmelCase : int = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase : Any = (image_size // patch_size) ** 2
UpperCAmelCase : Optional[Any] = num_patches + 1
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Dict ):
UpperCAmelCase : List[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [9_6, 1_9_2, 3_8_4, 7_6_8],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, backbone_out_indices=self.backbone_out_indices, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, is_hybrid=self.is_hybrid, backbone_config=__A, backbone_featmap_shape=self.backbone_featmap_shape, )
def __magic_name__ ( self : Optional[Any], __A : List[Any], __A : Union[str, Any], __A : Tuple ):
UpperCAmelCase : Optional[Any] = DPTModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : int = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Optional[int], __A : Any, __A : Dict, __A : Optional[int] ):
UpperCAmelCase : Optional[Any] = self.num_labels
UpperCAmelCase : List[Any] = DPTForDepthEstimation(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A )
self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size) )
def __magic_name__ ( self : Union[str, Any], __A : Dict, __A : List[Any], __A : Optional[int] ):
UpperCAmelCase : Dict = self.num_labels
UpperCAmelCase : Tuple = DPTForSemanticSegmentation(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A, labels=__A )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : str = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = config_and_inputs
UpperCAmelCase : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : int = DPTModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def __magic_name__ ( self : int ):
pass
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, nn.Linear ) )
def __magic_name__ ( self : Dict ):
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = model_class(__A )
UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : Any ):
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
def __magic_name__ ( self : Union[str, Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : str = True
if model_class in get_values(__A ):
continue
UpperCAmelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.train()
UpperCAmelCase : str = self._prepare_for_class(__A, __A, return_labels=__A )
UpperCAmelCase : Union[str, Any] = model(**__A ).loss
loss.backward()
def __magic_name__ ( self : Optional[int] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = False
UpperCAmelCase : int = True
if model_class in get_values(__A ) or not model_class.supports_gradient_checkpointing:
continue
UpperCAmelCase : Dict = model_class(__A )
model.to(__A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase : List[str] = self._prepare_for_class(__A, __A, return_labels=__A )
UpperCAmelCase : Any = model(**__A ).loss
loss.backward()
def __magic_name__ ( self : Dict ):
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[Any] = _config_zero_init(__A )
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(config=__A )
# Skip the check for the backbone
UpperCAmelCase : Dict = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCAmelCase : Optional[Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __magic_name__ ( self : Optional[int] ):
pass
@slow
def __magic_name__ ( self : Optional[Any] ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCAmelCase : Optional[int] = DPTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __magic_name__ ( self : int ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = '''add'''
with self.assertRaises(__A ):
UpperCAmelCase : Dict = DPTForDepthEstimation(__A )
def a__ ( ) -> Tuple:
UpperCAmelCase : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Dict = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCAmelCase : Tuple = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(__A )
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : int = model(**__A )
UpperCAmelCase : int = outputs.predicted_depth
# verify the predicted depth
UpperCAmelCase : Tuple = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape, __A )
UpperCAmelCase : Dict = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__A )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0, __A, atol=1E-4 ) )
| 99
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __SCREAMING_SNAKE_CASE ( pl.LightningModule ):
def __init__( self , SCREAMING_SNAKE_CASE__ ):
super().__init__()
lowercase : Any = model
lowercase : Optional[Any] = 2
lowercase : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __lowerCamelCase ( self ):
pass
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : str = LongformerModel.from_pretrained(_UpperCamelCase )
lowercase : int = LightningModel(_UpperCamelCase )
lowercase : Union[str, Any] = torch.load(_UpperCamelCase, map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase : List[Any] = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 337
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 368
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_A = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
_A = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
_A = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = BertTokenizer
def __init__( self , A_=None , A_=None , A_=True , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_=True , A_=None , **A_ , ) -> Any:
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
__UpperCamelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , A_ ) != do_lower_case
or normalizer_state.get('strip_accents' , A_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , A_ ) != tokenize_chinese_chars
):
__UpperCamelCase =getattr(A_ , normalizer_state.pop('type' ) )
__UpperCamelCase =do_lower_case
__UpperCamelCase =strip_accents
__UpperCamelCase =tokenize_chinese_chars
__UpperCamelCase =normalizer_class(**A_ )
__UpperCamelCase =do_lower_case
def _a ( self , A_ , A_=None ) -> List[str]:
__UpperCamelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 117
| 0
|
'''simple docstring'''
import argparse
import struct
import unittest
class A__ :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : bytes ) -> None:
"""simple docstring"""
_UpperCAmelCase : Tuple = data
# Initialize hash values
_UpperCAmelCase : Optional[Any] = [
0x6a09_e667,
0xbb67_ae85,
0x3c6e_f372,
0xa54f_f53a,
0x510e_527f,
0x9b05_688c,
0x1f83_d9ab,
0x5be0_cd19,
]
# Initialize round constants
_UpperCAmelCase : List[Any] = [
0x428a_2f98,
0x7137_4491,
0xb5c0_fbcf,
0xe9b5_dba5,
0x3956_c25b,
0x59f1_11f1,
0x923f_82a4,
0xab1c_5ed5,
0xd807_aa98,
0x1283_5b01,
0x2431_85be,
0x550c_7dc3,
0x72be_5d74,
0x80de_b1fe,
0x9bdc_06a7,
0xc19b_f174,
0xe49b_69c1,
0xefbe_4786,
0x0fc1_9dc6,
0x240c_a1cc,
0x2de9_2c6f,
0x4a74_84aa,
0x5cb0_a9dc,
0x76f9_88da,
0x983e_5152,
0xa831_c66d,
0xb003_27c8,
0xbf59_7fc7,
0xc6e0_0bf3,
0xd5a7_9147,
0x06ca_6351,
0x1429_2967,
0x27b7_0a85,
0x2e1b_2138,
0x4d2c_6dfc,
0x5338_0d13,
0x650a_7354,
0x766a_0abb,
0x81c2_c92e,
0x9272_2c85,
0xa2bf_e8a1,
0xa81a_664b,
0xc24b_8b70,
0xc76c_51a3,
0xd192_e819,
0xd699_0624,
0xf40e_3585,
0x106a_a070,
0x19a4_c116,
0x1e37_6c08,
0x2748_774c,
0x34b0_bcb5,
0x391c_0cb3,
0x4ed8_aa4a,
0x5b9c_ca4f,
0x682e_6ff3,
0x748f_82ee,
0x78a5_636f,
0x84c8_7814,
0x8cc7_0208,
0x90be_fffa,
0xa450_6ceb,
0xbef9_a3f7,
0xc671_78f2,
]
_UpperCAmelCase : Union[str, Any] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _lowerCAmelCase ( lowerCAmelCase__ : bytes ) -> bytes:
"""simple docstring"""
_UpperCAmelCase : Tuple = B"\x80" + (B"\x00" * (6_3 - (len(lowerCAmelCase__ ) + 8) % 6_4))
_UpperCAmelCase : Tuple = struct.pack(">Q" , (len(lowerCAmelCase__ ) * 8) )
return data + padding + big_endian_integer
def _lowerCAmelCase ( self : Optional[int] ) -> None:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = [
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data ) , 6_4 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_UpperCAmelCase : Optional[Any] = list(struct.unpack(">16L" , lowerCAmelCase__ ) )
# add 48 0-ed integers
words += [0] * 4_8
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = self.hashes
for index in range(0 , 6_4 ):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
_UpperCAmelCase : Tuple = (
self.ror(words[index - 1_5] , 7 )
^ self.ror(words[index - 1_5] , 1_8 )
^ (words[index - 1_5] >> 3)
)
_UpperCAmelCase : Dict = (
self.ror(words[index - 2] , 1_7 )
^ self.ror(words[index - 2] , 1_9 )
^ (words[index - 2] >> 1_0)
)
_UpperCAmelCase : Tuple = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_UpperCAmelCase : List[Any] = self.ror(lowerCAmelCase__ , 6 ) ^ self.ror(lowerCAmelCase__ , 1_1 ) ^ self.ror(lowerCAmelCase__ , 2_5 )
_UpperCAmelCase : Union[str, Any] = (e & f) ^ ((~e & 0xffff_ffff) & g)
_UpperCAmelCase : List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_UpperCAmelCase : Dict = self.ror(lowerCAmelCase__ , 2 ) ^ self.ror(lowerCAmelCase__ , 1_3 ) ^ self.ror(lowerCAmelCase__ , 2_2 )
_UpperCAmelCase : Dict = (a & b) ^ (a & c) ^ (b & c)
_UpperCAmelCase : Tuple = (sa + maj) % 0x1_0000_0000
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_UpperCAmelCase : Optional[int] = [a, b, c, d, e, f, g, h]
# Modify final values
_UpperCAmelCase : Optional[Any] = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
_UpperCAmelCase : List[Any] = "".join([hex(lowerCAmelCase__ )[2:].zfill(8 ) for value in self.hashes] )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
"""simple docstring"""
return 0xffff_ffff & (value << (3_2 - rotations)) | (value >> rotations)
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
import hashlib
_UpperCAmelCase : Optional[int] = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(lowerCAmelCase__ ).hash , hashlib.shaaaa(lowerCAmelCase__ ).hexdigest() )
def __UpperCAmelCase ( ):
import doctest
doctest.testmod()
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"-s", "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", )
parser.add_argument(
"-f", "--file", dest="input_file", help="Hash contents of a file" )
_UpperCAmelCase : Dict = parser.parse_args()
_UpperCAmelCase : Dict = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file, "rb" ) as f:
_UpperCAmelCase : List[str] = f.read()
else:
_UpperCAmelCase : List[str] = bytes(a_, "utf-8" )
print(SHAaaa(a_ ).hash )
if __name__ == "__main__":
main()
| 145
|
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : List[str]=None , **lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , lowerCAmelCase__ , )
super().__init__(args=lowerCAmelCase__ , **lowerCAmelCase__ )
| 145
| 1
|
from __future__ import annotations
import time
import numpy as np
__snake_case : Optional[int] =[8, 5, 9, 7]
__snake_case : Optional[Any] =[
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__snake_case : int =[
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,) -> None:
"""simple docstring"""
lowerCAmelCase__ : str = claim_vector
lowerCAmelCase__ : Union[str, Any] = allocated_resources_table
lowerCAmelCase__ : Any = maximum_claim_table
def lowerCAmelCase__ (self ) -> list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def lowerCAmelCase__ (self ) -> list[int]:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def lowerCAmelCase__ (self ) -> list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def lowerCAmelCase__ (self ) -> dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def lowerCAmelCase__ (self ,**__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.__need()
lowerCAmelCase__ : Optional[Any] = self.__allocated_resources_table
lowerCAmelCase__ : Optional[int] = self.__available_resources()
lowerCAmelCase__ : List[str] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
lowerCAmelCase__ : List[Any] = False
for each_need in need_list:
lowerCAmelCase__ : Tuple = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
lowerCAmelCase__ : List[Any] = False
break
if execution:
lowerCAmelCase__ : List[str] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowerCAmelCase__ : List[str] = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
lowerCAmelCase__ : Optional[Any] = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"""
+ ''' '''.join(f"""{it:>8}""" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"""
+ ''' '''.join(f"""{it:>8}""" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
|
def lowerCAmelCase__ ( lowerCamelCase_ : int = 1000):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = 1, 1
lowerCAmelCase__ : Any = 2
while True:
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Any = fa + fa
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = fa, f
index += 1
for _ in str(lowerCamelCase_):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 94
| 0
|
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def snake_case_ ( A_ : Union[str, Any], A_ : Dict, A_ : Any, A_ : Optional[int] ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
_lowerCamelCase : List[str] = TOKENIZER_CLASSES
else:
_lowerCamelCase : List[str] = {tokenizer_name: getattr(A_, tokenizer_name + '''Fast''' )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
_lowerCamelCase : Optional[int] = TOKENIZER_CLASSES[tokenizer_name]
_lowerCamelCase : List[str] = True
if checkpoint_name is None:
_lowerCamelCase : int = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_lowerCamelCase : List[str] = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
_lowerCamelCase : int = tokenizer_class.from_pretrained(A_, force_download=A_ )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = checkpoint.split('''/''' )
_lowerCamelCase : Dict = os.path.join(A_, A_ )
elif add_prefix:
_lowerCamelCase : List[Any] = checkpoint
_lowerCamelCase : str = dump_path
else:
_lowerCamelCase : str = None
_lowerCamelCase : List[str] = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_lowerCamelCase : int = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_lowerCamelCase : Union[str, Any] = file_path.split(A_ )[-1][0]
if next_char == "/":
_lowerCamelCase : Any = os.path.join(A_, A_ )
_lowerCamelCase : Union[str, Any] = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
_lowerCamelCase : Union[str, Any] = tokenizer.save_pretrained(
A_, legacy_format=A_, filename_prefix=A_ )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(A_ )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 72
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = "unispeech"
def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=1_2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=1_0_0 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict="mean" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Dict=8_0 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.5 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Any = feat_extract_norm
_lowerCamelCase : List[Any] = feat_extract_activation
_lowerCamelCase : Any = list(__lowerCAmelCase )
_lowerCamelCase : Tuple = list(__lowerCAmelCase )
_lowerCamelCase : int = list(__lowerCAmelCase )
_lowerCamelCase : List[str] = conv_bias
_lowerCamelCase : List[str] = num_conv_pos_embeddings
_lowerCamelCase : Tuple = num_conv_pos_embedding_groups
_lowerCamelCase : List[str] = len(self.conv_dim )
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Tuple = hidden_dropout
_lowerCamelCase : List[Any] = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : Optional[Any] = feat_proj_dropout
_lowerCamelCase : Optional[int] = final_dropout
_lowerCamelCase : Any = layerdrop
_lowerCamelCase : Any = layer_norm_eps
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : List[str] = num_ctc_classes
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = do_stable_layer_norm
_lowerCamelCase : Tuple = use_weighted_layer_sum
_lowerCamelCase : List[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Any = apply_spec_augment
_lowerCamelCase : Dict = mask_time_prob
_lowerCamelCase : List[str] = mask_time_length
_lowerCamelCase : Optional[Any] = mask_time_min_masks
_lowerCamelCase : List[str] = mask_feature_prob
_lowerCamelCase : int = mask_feature_length
_lowerCamelCase : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCamelCase : Optional[Any] = num_codevectors_per_group
_lowerCamelCase : int = num_codevector_groups
_lowerCamelCase : List[Any] = contrastive_logits_temperature
_lowerCamelCase : List[str] = feat_quantizer_dropout
_lowerCamelCase : Dict = num_negatives
_lowerCamelCase : Optional[int] = codevector_dim
_lowerCamelCase : List[Any] = proj_codevector_dim
_lowerCamelCase : List[Any] = diversity_loss_weight
# ctc loss
_lowerCamelCase : Union[str, Any] = ctc_loss_reduction
_lowerCamelCase : Any = ctc_zero_infinity
# pretraining loss
_lowerCamelCase : str = replace_prob
@property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 72
| 1
|
"""simple docstring"""
from __future__ import annotations
def __lowercase ( _a , _a ):
if nth_term == "":
return [""]
snake_case_ : List[str] = int(_a )
snake_case_ : str = int(_a )
snake_case_ : list[str] = []
for temp in range(int(_a ) ):
series.append(f"1 / {pow(temp + 1 , int(_a ) )}" if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : int = int(input('''Enter the last number (nth term) of the P-Series'''))
lowercase__ : List[Any] = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 155
|
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ : Union[str, Any] = get_tests_dir('''fixtures''')
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Any ):
# A mock response for an HTTP head request to emulate server down
snake_case_ : Any = mock.Mock()
snake_case_ : Tuple = 500
snake_case_ : Dict = {}
snake_case_ : Optional[Any] = HTTPError
snake_case_ : Optional[int] = {}
# Download this model to make sure it's in the cache.
snake_case_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowercase_ ) as mock_head:
snake_case_ : Any = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self : Optional[int] ):
# This test is for deprecated behavior and can be removed in v5
snake_case_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase):
@classmethod
def _snake_case ( cls : List[Any] ):
snake_case_ : Dict = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def _snake_case ( cls : int ):
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def _snake_case ( self : Any ):
snake_case_ : str = WavaVecaFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase_ , repo_id='''test-feature-extractor''' , push_to_hub=lowercase_ , use_auth_token=self._token )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def _snake_case ( self : List[Any] ):
snake_case_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase_ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=lowercase_ , use_auth_token=self._token )
snake_case_ : str = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def _snake_case ( self : List[Any] ):
CustomFeatureExtractor.register_for_auto_class()
snake_case_ : int = CustomFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
snake_case_ : List[str] = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 155
| 1
|
def A_ ( A__ , A__ , A__ ) -> float:
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
a__ : Union[str, Any] = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
a__ : Optional[int] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def A_ ( A__ ) -> float:
return np.dot(A__ , A__ )
class A__ :
"""simple docstring"""
def __init__( self , *,
lowercase = np.inf , lowercase = "linear" , lowercase = 0.0 , ) -> None:
'''simple docstring'''
a__ : Tuple = regularization
a__ : Optional[Any] = gamma
if kernel == "linear":
a__ : Optional[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
a__ : str = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
a__ : Optional[int] = F'Unknown kernel: {kernel}'
raise ValueError(lowercase)
def __lowercase ( self , lowercase , lowercase) -> float:
'''simple docstring'''
return np.dot(lowercase , lowercase)
def __lowercase ( self , lowercase , lowercase) -> float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __lowercase ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__ : List[str] = observations
a__ : Dict = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((a__) , ) : Optional[int] = np.shape(lowercase)
def to_minimize(lowercase) -> float:
a__ : Tuple = 0
((a__) , ) : Optional[int] = np.shape(lowercase)
for i in range(lowercase):
for j in range(lowercase):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(lowercase)
a__ : Optional[Any] = LinearConstraint(lowercase , 0 , 0)
a__ : str = Bounds(0 , self.regularization)
a__ : List[str] = minimize(
lowercase , np.ones(lowercase) , bounds=lowercase , constraints=[ly_contraint]).x
a__ : Dict = l_star
# calculating mean offset of separation plane to points
a__ : int = 0
for i in range(lowercase):
for j in range(lowercase):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
a__ : List[str] = s / n
def __lowercase ( self , lowercase) -> int:
'''simple docstring'''
a__ : int = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowercase)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99
| 1
|
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( _UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = FunnelTokenizer
lowerCamelCase = FunnelTokenizerFast
lowerCamelCase = True
lowerCamelCase = True
def UpperCAmelCase_ ( self ) -> int:
super().setUp()
A_ : Optional[Any] = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> Dict:
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> Any:
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> str:
A_ : Tuple = '''UNwant\u00E9d,running'''
A_ : Optional[int] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase_ ( self ) -> str:
A_ : List[str] = self.tokenizer_class(self.vocab_file )
A_ : Tuple = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_UpperCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Union[str, Any] = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
A_ : int = tokenizer("""UNwant\u00E9d,running""" )
A_ : Union[str, Any] = len(inputs["""input_ids"""] ) - 1
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len )
A_ : Union[str, Any] = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" )
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
| 352
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : int = {'vocab_file': 'spm_char.model'}
UpperCamelCase__ : Optional[Any] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
UpperCamelCase__ : Union[str, Any] = {
'microsoft/speecht5_asr': 1_024,
'microsoft/speecht5_tts': 1_024,
'microsoft/speecht5_vc': 1_024,
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase = None , **_lowerCamelCase , ) -> None:
A_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : List[Any] = vocab_file
A_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def UpperCAmelCase_ ( self ) -> Any:
return self.sp_model.get_piece_size()
def UpperCAmelCase_ ( self ) -> int:
A_ : Dict = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
A_ : Optional[int] = self.__dict__.copy()
A_ : str = None
return state
def __setstate__( self , _lowerCamelCase ) -> List[str]:
A_ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ : Union[str, Any] = {}
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
return self.sp_model.piece_to_id(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
A_ : Dict = self.sp_model.IdToPiece(_lowerCamelCase )
return token
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Union[str, Any]:
A_ : Tuple = []
A_ : Union[str, Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase ) + token
A_ : Optional[int] = []
else:
current_sub_tokens.append(_lowerCamelCase )
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A_ : Union[str, Any] = [1]
if token_ids_a is None:
return ([0] * len(_lowerCamelCase )) + suffix_ones
return ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
A_ : Optional[int] = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , """wb""" ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 164
| 0
|
'''simple docstring'''
from functools import lru_cache
@lru_cache
def _A ( A__ ):
"""simple docstring"""
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case__ : int = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
snake_case__ : int = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
snake_case__ : str = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = BertTokenizer
def __init__(self :List[str] , _UpperCamelCase :List[str]=None , _UpperCamelCase :Optional[Any]=None , _UpperCamelCase :str=True , _UpperCamelCase :Optional[Any]="[UNK]" , _UpperCamelCase :Tuple="[SEP]" , _UpperCamelCase :List[Any]="[PAD]" , _UpperCamelCase :int="[CLS]" , _UpperCamelCase :Optional[int]="[MASK]" , _UpperCamelCase :Union[str, Any]=True , _UpperCamelCase :str=None , **_UpperCamelCase :List[str] , )-> str:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
__A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars
):
__A = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) )
__A = do_lower_case
__A = strip_accents
__A = tokenize_chinese_chars
__A = normalizer_class(**_UpperCamelCase )
__A = do_lower_case
def _lowerCAmelCase (self :Any , _UpperCamelCase :int , _UpperCamelCase :List[str]=None )-> List[Any]:
__A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :List[int] , _UpperCamelCase :Optional[List[int]] = None )-> List[int]:
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase (self :Any , _UpperCamelCase :str , _UpperCamelCase :Optional[str] = None )-> Tuple[str]:
__A = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 117
| 0
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> bool:
"""simple docstring"""
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = credit_card_number
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : List[Any] = len(lowerCAmelCase__ ) - 2
for i in range(lowerCAmelCase__ , -1 , -2 ):
# double the value of every second digit
lowerCAmelCase_ : Any = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowerCAmelCase_ : Optional[int] = cc_number[:i] + str(lowerCAmelCase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCAmelCase__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = f"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(f"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(lowerCAmelCase__ ) <= 16:
print(f"{error_message} of its length." )
return False
if not validate_initial_digits(lowerCAmelCase__ ):
print(f"{error_message} of its first two digits." )
return False
if not luhn_validation(lowerCAmelCase__ ):
print(f"{error_message} it fails the Luhn check." )
return False
print(f"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 289
|
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowercase__ : Tuple = datasets.logging.get_logger(__name__)
lowercase__ : List[Any] = """\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
"""
lowercase__ : Tuple = """\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project's README at https://github.com/google-research/bleurt#readme for more information.
"""
lowercase__ : List[Any] = """
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
'scores': List of scores.
Examples:
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> bleurt = datasets.load_metric(\"bleurt\")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results[\"scores\"]])
[1.03, 1.04]
"""
lowercase__ : List[Any] = {
"""bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""",
"""bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""",
"""bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""",
"""bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""",
"""bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""",
"""bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""",
"""BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""",
"""BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""",
"""BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""",
"""BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : str ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' )
lowerCAmelCase_ : List[Any] = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
lowerCAmelCase_ : List[Any] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
lowerCAmelCase_ : Tuple = self.config_name.upper()
else:
raise KeyError(
F"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" )
# download the model checkpoint specified by self.config_name and set up the scorer
lowerCAmelCase_ : List[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
lowerCAmelCase_ : List[str] = score.BleurtScorer(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase_ : Tuple = self.scorer.score(references=SCREAMING_SNAKE_CASE_ , candidates=SCREAMING_SNAKE_CASE_ )
return {"scores": scores}
| 289
| 1
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase__ ( _A ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(UpperCAmelCase_ , '_dynamo' ):
return False
return isinstance(UpperCAmelCase_ , torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase__ ( _A , _A = True ):
a : List[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
a : List[str] = is_compiled_module(UpperCAmelCase_ )
if is_compiled:
a : Tuple = model
a : Optional[int] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
a : Any = model.module
if not keep_fpaa_wrapper:
a : Union[str, Any] = getattr(UpperCAmelCase_ , 'forward' )
a : str = model.__dict__.pop('_original_forward' , UpperCAmelCase_ )
if original_forward is not None:
while hasattr(UpperCAmelCase_ , '__wrapped__' ):
a : Tuple = forward.__wrapped__
if forward == original_forward:
break
a : Union[str, Any] = forward
if getattr(UpperCAmelCase_ , '_converted_to_transformer_engine' , UpperCAmelCase_ ):
convert_model(UpperCAmelCase_ , to_transformer_engine=UpperCAmelCase_ )
if is_compiled:
a : List[Any] = model
a : int = compiled_model
return model
def lowerCamelCase__ ( ):
PartialState().wait_for_everyone()
def lowerCamelCase__ ( _A , _A ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(UpperCAmelCase_ , UpperCAmelCase_ )
elif PartialState().local_process_index == 0:
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
@contextmanager
def lowerCamelCase__ ( **_A ):
for key, value in kwargs.items():
a : Union[str, Any] = str(UpperCAmelCase_ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase__ ( _A ):
if not hasattr(UpperCAmelCase_ , '__qualname__' ) and not hasattr(UpperCAmelCase_ , '__name__' ):
a : List[str] = getattr(UpperCAmelCase_ , '__class__' , UpperCAmelCase_ )
if hasattr(UpperCAmelCase_ , '__qualname__' ):
return obj.__qualname__
if hasattr(UpperCAmelCase_ , '__name__' ):
return obj.__name__
return str(UpperCAmelCase_ )
def lowerCamelCase__ ( _A , _A ):
for key, value in source.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
a : Tuple = destination.setdefault(UpperCAmelCase_ , {} )
merge_dicts(UpperCAmelCase_ , UpperCAmelCase_ )
else:
a : Optional[int] = value
return destination
def lowerCamelCase__ ( _A = None ):
if port is None:
a : Any = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 297
|
def __lowerCamelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : list , UpperCAmelCase_ : int ):
"""simple docstring"""
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a :Optional[int] = [p / w for p, w in zip(UpperCAmelCase_ , UpperCAmelCase_ )]
# Creating a copy of the list and sorting profit/weight in ascending order
a :List[Any] = sorted(UpperCAmelCase_ )
# declaring useful variables
a :Dict = len(UpperCAmelCase_ )
a :Tuple = 0
a :List[Any] = 0
a :str = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a :List[Any] = sorted_profit_by_weight[length - i - 1]
a :Optional[Any] = profit_by_weight.index(UpperCAmelCase_ )
a :Optional[int] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Union[str, Any] = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Tuple = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : str = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 94
| 0
|
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase ( _A, _A, _A = 1 / sqrt(2 ) ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = tau * frequency / samplerate
__magic_name__ : str = sin(_A )
__magic_name__ : str = cos(_A )
__magic_name__ : List[Any] = _sin / (2 * q_factor)
__magic_name__ : List[str] = (1 - _cos) / 2
__magic_name__ : Any = 1 - _cos
__magic_name__ : Union[str, Any] = 1 + alpha
__magic_name__ : int = -2 * _cos
__magic_name__ : Tuple = 1 - alpha
__magic_name__ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def UpperCamelCase ( _A, _A, _A = 1 / sqrt(2 ) ):
"""simple docstring"""
__magic_name__ : Optional[int] = tau * frequency / samplerate
__magic_name__ : List[Any] = sin(_A )
__magic_name__ : Union[str, Any] = cos(_A )
__magic_name__ : List[str] = _sin / (2 * q_factor)
__magic_name__ : str = (1 + _cos) / 2
__magic_name__ : Any = -1 - _cos
__magic_name__ : int = 1 + alpha
__magic_name__ : Tuple = -2 * _cos
__magic_name__ : int = 1 - alpha
__magic_name__ : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def UpperCamelCase ( _A, _A, _A = 1 / sqrt(2 ) ):
"""simple docstring"""
__magic_name__ : Any = tau * frequency / samplerate
__magic_name__ : Tuple = sin(_A )
__magic_name__ : Dict = cos(_A )
__magic_name__ : Optional[Any] = _sin / (2 * q_factor)
__magic_name__ : List[Any] = _sin / 2
__magic_name__ : Any = 0
__magic_name__ : Union[str, Any] = -ba
__magic_name__ : List[Any] = 1 + alpha
__magic_name__ : List[Any] = -2 * _cos
__magic_name__ : int = 1 - alpha
__magic_name__ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def UpperCamelCase ( _A, _A, _A = 1 / sqrt(2 ) ):
"""simple docstring"""
__magic_name__ : List[str] = tau * frequency / samplerate
__magic_name__ : Optional[Any] = sin(_A )
__magic_name__ : Optional[Any] = cos(_A )
__magic_name__ : List[str] = _sin / (2 * q_factor)
__magic_name__ : Union[str, Any] = 1 - alpha
__magic_name__ : Optional[int] = -2 * _cos
__magic_name__ : int = 1 + alpha
__magic_name__ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba], [ba, ba, ba] )
return filt
def UpperCamelCase ( _A, _A, _A, _A = 1 / sqrt(2 ), ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = tau * frequency / samplerate
__magic_name__ : int = sin(_A )
__magic_name__ : Any = cos(_A )
__magic_name__ : str = _sin / (2 * q_factor)
__magic_name__ : Union[str, Any] = 10 ** (gain_db / 40)
__magic_name__ : Dict = 1 + alpha * big_a
__magic_name__ : List[Any] = -2 * _cos
__magic_name__ : List[Any] = 1 - alpha * big_a
__magic_name__ : Optional[int] = 1 + alpha / big_a
__magic_name__ : List[Any] = -2 * _cos
__magic_name__ : str = 1 - alpha / big_a
__magic_name__ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def UpperCamelCase ( _A, _A, _A, _A = 1 / sqrt(2 ), ):
"""simple docstring"""
__magic_name__ : Tuple = tau * frequency / samplerate
__magic_name__ : Optional[Any] = sin(_A )
__magic_name__ : Any = cos(_A )
__magic_name__ : List[Any] = _sin / (2 * q_factor)
__magic_name__ : Optional[int] = 10 ** (gain_db / 40)
__magic_name__ : List[str] = (big_a + 1) - (big_a - 1) * _cos
__magic_name__ : Optional[Any] = (big_a + 1) + (big_a - 1) * _cos
__magic_name__ : Dict = (big_a - 1) - (big_a + 1) * _cos
__magic_name__ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
__magic_name__ : List[str] = 2 * sqrt(_A ) * alpha
__magic_name__ : Dict = big_a * (pmc + aaa)
__magic_name__ : Tuple = 2 * big_a * mpc
__magic_name__ : str = big_a * (pmc - aaa)
__magic_name__ : Union[str, Any] = ppmc + aaa
__magic_name__ : List[str] = -2 * pmpc
__magic_name__ : Optional[int] = ppmc - aaa
__magic_name__ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def UpperCamelCase ( _A, _A, _A, _A = 1 / sqrt(2 ), ):
"""simple docstring"""
__magic_name__ : List[Any] = tau * frequency / samplerate
__magic_name__ : Union[str, Any] = sin(_A )
__magic_name__ : Optional[Any] = cos(_A )
__magic_name__ : Union[str, Any] = _sin / (2 * q_factor)
__magic_name__ : Dict = 10 ** (gain_db / 40)
__magic_name__ : List[Any] = (big_a + 1) - (big_a - 1) * _cos
__magic_name__ : Dict = (big_a + 1) + (big_a - 1) * _cos
__magic_name__ : List[str] = (big_a - 1) - (big_a + 1) * _cos
__magic_name__ : Any = (big_a - 1) + (big_a + 1) * _cos
__magic_name__ : str = 2 * sqrt(_A ) * alpha
__magic_name__ : Dict = big_a * (ppmc + aaa)
__magic_name__ : Tuple = -2 * big_a * pmpc
__magic_name__ : List[Any] = big_a * (ppmc - aaa)
__magic_name__ : Union[str, Any] = pmc + aaa
__magic_name__ : List[Any] = 2 * mpc
__magic_name__ : int = pmc - aaa
__magic_name__ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
| 359
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__magic_name__: Any = logging.get_logger(__name__)
__magic_name__: Dict = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Any = '''blenderbot-small'''
lowercase__ : Optional[int] = ['''past_key_values''']
lowercase__ : Dict = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase__=5_02_65 , lowerCAmelCase__=5_12 , lowerCAmelCase__=8 , lowerCAmelCase__=20_48 , lowerCAmelCase__=16 , lowerCAmelCase__=8 , lowerCAmelCase__=20_48 , lowerCAmelCase__=16 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="gelu" , lowerCAmelCase__=5_12 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1 , lowerCAmelCase__=False , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ) -> Tuple:
__magic_name__ : Tuple = vocab_size
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : Union[str, Any] = d_model
__magic_name__ : Optional[int] = encoder_ffn_dim
__magic_name__ : Union[str, Any] = encoder_layers
__magic_name__ : List[str] = encoder_attention_heads
__magic_name__ : List[Any] = decoder_ffn_dim
__magic_name__ : str = decoder_layers
__magic_name__ : List[str] = decoder_attention_heads
__magic_name__ : Union[str, Any] = dropout
__magic_name__ : Tuple = attention_dropout
__magic_name__ : List[Any] = activation_dropout
__magic_name__ : List[Any] = activation_function
__magic_name__ : Optional[int] = init_std
__magic_name__ : Dict = encoder_layerdrop
__magic_name__ : Union[str, Any] = decoder_layerdrop
__magic_name__ : Optional[int] = use_cache
__magic_name__ : List[Any] = encoder_layers
__magic_name__ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
class snake_case__ ( _lowerCAmelCase ):
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Optional[int] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__magic_name__ : List[Any] = {0: """batch"""}
__magic_name__ : Dict = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__magic_name__ : List[str] = {0: """batch""", 1: """decoder_sequence"""}
__magic_name__ : Any = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__magic_name__ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__magic_name__ ,__magic_name__ : Dict = self.num_layers
for i in range(lowerCAmelCase__ ):
__magic_name__ : Dict = {0: """batch""", 2: """past_sequence + sequence"""}
__magic_name__ : int = {0: """batch""", 2: """past_sequence + sequence"""}
else:
__magic_name__ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Any = super().outputs
else:
__magic_name__ : int = super(lowerCAmelCase__ , self ).outputs
if self.use_past:
__magic_name__ ,__magic_name__ : str = self.num_layers
for i in range(lowerCAmelCase__ ):
__magic_name__ : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
__magic_name__ : Dict = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
__magic_name__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Generate decoder inputs
__magic_name__ : Optional[int] = seq_length if not self.use_past else 1
__magic_name__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Optional[Any] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__magic_name__ : Dict = dict(**lowerCAmelCase__ , **lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__magic_name__ ,__magic_name__ : List[Any] = common_inputs["""input_ids"""].shape
__magic_name__ : Optional[Any] = common_inputs["""decoder_input_ids"""].shape[1]
__magic_name__ ,__magic_name__ : str = self.num_attention_heads
__magic_name__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__magic_name__ : Any = decoder_seq_length + 3
__magic_name__ : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__magic_name__ : List[str] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ )] , dim=1 )
__magic_name__ : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__magic_name__ ,__magic_name__ : List[str] = self.num_layers
__magic_name__ : Optional[Any] = min(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[str] = max(lowerCAmelCase__ , lowerCAmelCase__ ) - min_num_layers
__magic_name__ : Tuple = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(lowerCAmelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
) )
# TODO: test this.
__magic_name__ : Union[str, Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(lowerCAmelCase__ , lowerCAmelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) )
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
__magic_name__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__magic_name__ ,__magic_name__ : Tuple = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__magic_name__ : List[Any] = seqlen + 2
__magic_name__ ,__magic_name__ : Any = self.num_layers
__magic_name__ ,__magic_name__ : int = self.num_attention_heads
__magic_name__ : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__magic_name__ : Optional[int] = common_inputs["""attention_mask"""].dtype
__magic_name__ : Optional[Any] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
__magic_name__ : Tuple = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(lowerCAmelCase__ )
]
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__magic_name__ : Tuple = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__magic_name__ : str = tokenizer.num_special_tokens_to_add(lowerCAmelCase__ )
__magic_name__ : List[str] = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
__magic_name__ : List[Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__magic_name__ : List[str] = dict(tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) )
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
elif self.task == "causal-lm":
__magic_name__ : str = self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
else:
__magic_name__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : List[Any] = super()._flatten_past_key_values_(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
__magic_name__ : Tuple = super(lowerCAmelCase__ , self )._flatten_past_key_values_(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
| 138
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def __lowercase ( self : str ):
lowerCAmelCase = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
lowerCAmelCase = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowerCAmelCase = model(lowerCAmelCase )["""last_hidden_state"""]
lowerCAmelCase = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , lowerCAmelCase )
# compare the actual values for a slice.
lowerCAmelCase = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 155
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = BlipImageProcessor()
lowerCAmelCase = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowerCAmelCase = BlipProcessor(lowerCAmelCase , lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __lowercase ( self : Optional[Any] , **lowerCAmelCase : Tuple ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).tokenizer
def __lowercase ( self : List[Any] , **lowerCAmelCase : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).image_processor
def __lowercase ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : str ):
lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self : List[str] ):
lowerCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase = self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 )
lowerCAmelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(lowerCAmelCase , return_tensors="""np""" )
lowerCAmelCase = processor(images=lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = processor(text=lowerCAmelCase )
lowerCAmelCase = tokenizer(lowerCAmelCase , return_token_type_ids=lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=lowerCAmelCase , images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def __lowercase ( self : List[Any] ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(lowerCAmelCase )
lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=lowerCAmelCase , images=lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 155
| 1
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase__ : Tuple =logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] ='T5Config'
def a__ ( A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : int = jnp.zeros_like(A__ )
SCREAMING_SNAKE_CASE_ : str = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
SCREAMING_SNAKE_CASE_ : List[str] = shifted_input_ids.at[:, 0].set(A__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.where(shifted_input_ids == -1_0_0, A__, A__ )
return shifted_input_ids
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """mt5"""
_UpperCAmelCase = MTaConfig
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """mt5"""
_UpperCAmelCase = MTaConfig
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """mt5"""
_UpperCAmelCase = MTaConfig
| 162
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def a__ ( A__ ):
if is_torch_version('<', '2.0.0' ) or not hasattr(A__, '_dynamo' ):
return False
return isinstance(A__, torch._dynamo.eval_frame.OptimizedModule )
def a__ ( A__, A__ = True ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE_ : List[str] = is_compiled_module(A__ )
if is_compiled:
SCREAMING_SNAKE_CASE_ : List[Any] = model
SCREAMING_SNAKE_CASE_ : Dict = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(A__, A__ ):
SCREAMING_SNAKE_CASE_ : int = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE_ : str = getattr(A__, 'forward' )
SCREAMING_SNAKE_CASE_ : Any = model.__dict__.pop('_original_forward', A__ )
if original_forward is not None:
while hasattr(A__, '__wrapped__' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE_ : Any = forward
if getattr(A__, '_converted_to_transformer_engine', A__ ):
convert_model(A__, to_transformer_engine=A__ )
if is_compiled:
SCREAMING_SNAKE_CASE_ : List[str] = model
SCREAMING_SNAKE_CASE_ : Dict = compiled_model
return model
def a__ ( ):
PartialState().wait_for_everyone()
def a__ ( A__, A__ ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(A__, A__ )
elif PartialState().local_process_index == 0:
torch.save(A__, A__ )
@contextmanager
def a__ ( **A__ ):
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE_ : List[Any] = str(A__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def a__ ( A__ ):
if not hasattr(A__, '__qualname__' ) and not hasattr(A__, '__name__' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(A__, '__class__', A__ )
if hasattr(A__, '__qualname__' ):
return obj.__qualname__
if hasattr(A__, '__name__' ):
return obj.__name__
return str(A__ )
def a__ ( A__, A__ ):
for key, value in source.items():
if isinstance(A__, A__ ):
SCREAMING_SNAKE_CASE_ : Dict = destination.setdefault(A__, {} )
merge_dicts(A__, A__ )
else:
SCREAMING_SNAKE_CASE_ : Tuple = value
return destination
def a__ ( A__ = None ):
if port is None:
SCREAMING_SNAKE_CASE_ : Tuple = 2_9_5_0_0
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 162
| 1
|
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _A ( lowercase__ ):
return "".join(sorted(lowercase__ ) )
def _A ( lowercase__ ):
return word_by_signature[signature(lowercase__ )]
__A = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
__A = sorted({word.strip().lower() for word in data.splitlines()})
__A = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__A = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 164
|
'''simple docstring'''
import os
import sys
__A = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__A = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def _A ( *lowercase__ , **lowercase__ ):
return AutoConfig.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _A ( *lowercase__ , **lowercase__ ):
return AutoTokenizer.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModel.__doc__ )
def _A ( *lowercase__ , **lowercase__ ):
return AutoModel.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _A ( *lowercase__ , **lowercase__ ):
return AutoModelForCausalLM.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _A ( *lowercase__ , **lowercase__ ):
return AutoModelForMaskedLM.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _A ( *lowercase__ , **lowercase__ ):
return AutoModelForSequenceClassification.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _A ( *lowercase__ , **lowercase__ ):
return AutoModelForQuestionAnswering.from_pretrained(*lowercase__ , **lowercase__ )
| 164
| 1
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __snake_case ( _UpperCAmelCase ):
__a = filter(lambda _UpperCAmelCase : p.requires_grad , model.parameters() )
__a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__snake_case :Dict = logging.getLogger(__name__)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if metric == "rouge2":
__a = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__a = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__a = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
__a = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
''' function.''' )
__a = ModelCheckpoint(
dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=f'val_{metric}' , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return EarlyStopping(
monitor=f'val_{metric}' , mode='''min''' if '''loss''' in metric else '''max''' , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , )
class _A ( pl.Callback ):
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = {F'lr_group_{i}': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(__SCREAMING_SNAKE_CASE)
@rank_zero_only
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=True):
'''simple docstring'''
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****')
__a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']})
# Log results
__a = Path(pl_module.hparams.output_dir)
if type_path == "test":
__a = od / '''test_results.txt'''
__a = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__a = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
__a = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__SCREAMING_SNAKE_CASE)
generations_file.parent.mkdir(exist_ok=__SCREAMING_SNAKE_CASE)
with open(__SCREAMING_SNAKE_CASE , '''a+''') as writer:
for key in sorted(__SCREAMING_SNAKE_CASE):
if key in ["log", "progress_bar", "preds"]:
continue
__a = metrics[key]
if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor):
__a = val.item()
__a = F'{key}: {val:.6f}\n'
writer.write(__SCREAMING_SNAKE_CASE)
if not save_generations:
return
if "preds" in metrics:
__a = '''\n'''.join(metrics['''preds'''])
generations_file.open('''w+''').write(__SCREAMING_SNAKE_CASE)
@rank_zero_only
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
try:
__a = pl_module.model.model.num_parameters()
except AttributeError:
__a = pl_module.model.num_parameters()
__a = count_trainable_parameters(__SCREAMING_SNAKE_CASE)
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6})
@rank_zero_only
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''test''')
@rank_zero_only
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 131
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : str):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = 1
__a = 3
__a = (32, 32)
__a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(__SCREAMING_SNAKE_CASE)
return image
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
torch.manual_seed(0)
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
torch.manual_seed(0)
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def _lowerCamelCase ( self : Any):
'''simple docstring'''
torch.manual_seed(0)
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(__SCREAMING_SNAKE_CASE)
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
def extract(*__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Dict):
class _A :
def __init__( self : int):
'''simple docstring'''
__a = torch.ones([0])
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.pixel_values.to(__SCREAMING_SNAKE_CASE)
return self
return Out()
return extract
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a = self.dummy_cond_unet
__a = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
# make sure here that pndm scheduler skips prk
__a = StableDiffusionPipeline(
unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
__a = sd_pipe.to(__SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = '''A painting of a squirrel eating a burger'''
__a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(0)
__a = sd_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''')
__a = output.images
__a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(0)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__SCREAMING_SNAKE_CASE , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a = self.dummy_cond_unet
__a = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE)
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
# make sure here that pndm scheduler skips prk
__a = StableDiffusionPipeline(
unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
__a = sd_pipe.to(__SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = '''A painting of a squirrel eating a burger'''
__a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(0)
__a = sd_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''')
__a = output.images
__a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(0)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__SCREAMING_SNAKE_CASE , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=__SCREAMING_SNAKE_CASE)
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
assert isinstance(pipe.scheduler , __SCREAMING_SNAKE_CASE)
assert pipe.safety_checker is None
__a = pipe('''example prompt''' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__a = pipe('''example prompt''' , num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''')
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.dummy_cond_unet
__a = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE)
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
# put models in fp16
__a = unet.half()
__a = vae.half()
__a = bert.half()
# make sure here that pndm scheduler skips prk
__a = StableDiffusionPipeline(
unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
__a = sd_pipe.to(__SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = '''A painting of a squirrel eating a burger'''
__a = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''').images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__SCREAMING_SNAKE_CASE)
__a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
__a = sd_pipe.to(__SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
__a = 4_003_660_346
__a = 7
# without safety guidance (sld_guidance_scale = 0)
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
# without safety guidance (strong configuration)
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__SCREAMING_SNAKE_CASE)
__a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
__a = sd_pipe.to(__SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = '''padme amidala taking a bath artwork, safe for work, no nudity'''
__a = 2_734_971_755
__a = 7
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''')
__a = sd_pipe.to(__SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
__a = 1_044_355_234
__a = 12
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-7
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 131
| 1
|
"""simple docstring"""
import heapq
import sys
import numpy as np
UpperCAmelCase__ = tuple[int, int]
class a :
def __init__( self : Any ):
_UpperCAmelCase = []
_UpperCAmelCase = set()
def lowerCAmelCase_ ( self : List[str] ):
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def lowerCAmelCase_ ( self : Union[str, Any] ):
return len(self.elements ) == 0
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__lowerCAmelCase )
else:
# update
# print("update", item)
_UpperCAmelCase = []
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[Any] ):
if item in self.set:
self.set.remove(__lowerCAmelCase )
_UpperCAmelCase = []
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowerCAmelCase_ ( self : Optional[Any] ):
return self.elements[0][1]
def lowerCAmelCase_ ( self : Dict ):
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
self.set.remove(__lowerCAmelCase )
return (priority, item)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
# euclidean distance
_UpperCAmelCase = np.array(lowercase )
_UpperCAmelCase = np.array(lowercase )
return np.linalg.norm(a - b )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
# integer division by time variable
return consistent_heuristic(lowercase ,lowercase ) // t
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = g_function[start] + Wa * heuristics[i](lowercase ,lowercase )
return ans
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = np.chararray((n, n) )
for i in range(lowercase ):
for j in range(lowercase ):
_UpperCAmelCase = """*"""
for i in range(lowercase ):
for j in range(lowercase ):
if (j, (n - 1) - i) in blocks:
_UpperCAmelCase = """#"""
_UpperCAmelCase = """-"""
_UpperCAmelCase = back_pointer[goal]
while x != start:
((_UpperCAmelCase) , (_UpperCAmelCase)) = x
# print(x)
_UpperCAmelCase = """-"""
_UpperCAmelCase = back_pointer[x]
_UpperCAmelCase = """-"""
for i in range(lowercase ):
for j in range(lowercase ):
if (i, j) == (0, n - 1):
print(grid[i][j] ,end=""" """ )
print("""<-- End position""" ,end=""" """ )
else:
print(grid[i][j] ,end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
_UpperCAmelCase = back_pointer[goal]
while x != start:
print(lowercase ,end=""" """ )
_UpperCAmelCase = back_pointer[x]
print(lowercase )
sys.exit()
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
for itera in range(lowercase ):
open_list[itera].remove_element(lowercase )
# print("s", s)
# print("j", j)
((_UpperCAmelCase) , (_UpperCAmelCase)) = s
_UpperCAmelCase = (x - 1, y)
_UpperCAmelCase = (x + 1, y)
_UpperCAmelCase = (x, y + 1)
_UpperCAmelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowercase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowercase )
_UpperCAmelCase = -1
_UpperCAmelCase = float("""inf""" )
if valid(lowercase ) and g_function[neighbours] > g_function[s] + 1:
_UpperCAmelCase = g_function[s] + 1
_UpperCAmelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowercase ,key(lowercase ,0 ,lowercase ,lowercase ) )
if neighbours not in close_list_inad:
for var in range(1 ,lowercase ):
if key(lowercase ,lowercase ,lowercase ,lowercase ) <= Wa * key(
lowercase ,0 ,lowercase ,lowercase ):
open_list[j].put(
lowercase ,key(lowercase ,lowercase ,lowercase ,lowercase ) )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = []
for x in range(1 ,5 ):
for y in range(1 ,6 ):
some_list.append((x, y) )
for x in range(15 ,20 ):
some_list.append((x, 17) )
for x in range(10 ,19 ):
for y in range(1 ,15 ):
some_list.append((x, y) )
# L block
for x in range(1 ,4 ):
for y in range(12 ,19 ):
some_list.append((x, y) )
for x in range(3 ,13 ):
for y in range(16 ,19 ):
some_list.append((x, y) )
return some_list
UpperCAmelCase__ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCAmelCase__ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(1_0, 1),
(1_1, 1),
(1_2, 1),
(1_3, 1),
(1_4, 1),
(1_5, 1),
(1_6, 1),
(1_7, 1),
(1_8, 1),
(1_9, 1),
]
UpperCAmelCase__ = make_common_ground()
UpperCAmelCase__ = blocks_blk
# hyper parameters
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2_0
UpperCAmelCase__ = 3 # one consistent and two other inconsistent
# start and end destination
UpperCAmelCase__ = (0, 0)
UpperCAmelCase__ = (n - 1, n - 1)
UpperCAmelCase__ = 1
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = {start: 0, goal: float("""inf""" )}
_UpperCAmelCase = {start: -1, goal: -1}
_UpperCAmelCase = []
_UpperCAmelCase = set()
for i in range(lowercase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowercase ,key(lowercase ,lowercase ,lowercase ,lowercase ) )
_UpperCAmelCase = []
_UpperCAmelCase = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 ,lowercase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowercase ,lowercase ,lowercase )
else:
_UpperCAmelCase , _UpperCAmelCase = open_list[i].top_show()
visited.add(lowercase )
expand_state(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,)
close_list_inad.append(lowercase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowercase ,lowercase ,lowercase )
else:
_UpperCAmelCase = open_list[0].top_show()
visited.add(lowercase )
expand_state(
lowercase ,0 ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,)
close_list_anchor.append(lowercase )
print("""No path found to goal""" )
print()
for i in range(n - 1 ,-1 ,-1 ):
for j in range(lowercase ):
if (j, i) in blocks:
print("""#""" ,end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" ,end=""" """ )
else:
print("""-""" ,end=""" """ )
else:
print("""*""" ,end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" ,end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 289
|
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : int = StableUnCLIPPipeline
_snake_case : str = TEXT_TO_IMAGE_PARAMS
_snake_case : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_snake_case : str = False
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = 32
_UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=__lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__lowerCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
_UpperCAmelCase = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
_UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=__lowerCAmelCase )
_UpperCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCAmelCase , layers_per_block=1 , upcast_attention=__lowerCAmelCase , use_linear_projection=__lowerCAmelCase , )
torch.manual_seed(0 )
_UpperCAmelCase = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=0 ):
if str(__lowerCAmelCase ).startswith("""mps""" ):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=__lowerCAmelCase )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
_UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase = pipe("""anime turle""" , generator=__lowerCAmelCase , output_type="""np""" )
_UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
_UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 289
| 1
|
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Any:
A: Dict = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A: Tuple = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
A: str = F"""{src_lang}-{tgt_lang}"""
A: Optional[Any] = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(__lowercase , exist_ok=__lowercase )
A: Dict = os.path.join(__lowercase , '''README.md''' )
print(F"""Generating {path}""" )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(__lowercase )
# make sure we are under the root of the project
UpperCamelCase = Path(__file__).resolve().parent.parent.parent
UpperCamelCase = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCamelCase , UpperCamelCase , UpperCamelCase = model_name.split('''-''')
UpperCamelCase = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 334
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 334
| 1
|
def __UpperCamelCase ( _lowerCAmelCase ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
A : Dict = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A : List[Any] = 1
if upper_limit > 0:
A : int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(_UpperCAmelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
SCREAMING_SNAKE_CASE_:Optional[int] = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 116
|
__A : dict[tuple[int, int, int], int] = {}
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowerCAmelCase : Dict = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowerCAmelCase : int = _calculate(days - 1, _UpperCAmelCase, late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowerCAmelCase : List[Any] = _calculate(days - 1, absent + 1, 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowerCAmelCase : Optional[Any] = _calculate(days - 1, _UpperCAmelCase, 0 )
lowerCAmelCase : int = state_late + state_absent + state_ontime
lowerCAmelCase : Any = prizestrings
return prizestrings
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 30 ) -> int:
'''simple docstring'''
return _calculate(_UpperCAmelCase, absent=0, late=0 )
if __name__ == "__main__":
print(solution())
| 138
| 0
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( a__ : str ) -> List[str]:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
@staticmethod
def lowerCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__UpperCamelCase , help="""Name of the model to download""" )
download_parser.set_defaults(func=__UpperCamelCase )
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = model
UpperCamelCase_ = cache
UpperCamelCase_ = force
UpperCamelCase_ = trust_remote_code
def lowerCamelCase_ ( self ):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 261
|
def lowerCamelCase__ ( a__ : Optional[int] , a__ : Any ) -> Optional[Any]:
UpperCamelCase_ = 0
UpperCamelCase_ = len(a__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCamelCase_ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(a__ ):
return None
UpperCamelCase_ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCamelCase_ = left
UpperCamelCase_ = point
elif point > right:
UpperCamelCase_ = right
UpperCamelCase_ = point
else:
if item < current_item:
UpperCamelCase_ = point - 1
else:
UpperCamelCase_ = point + 1
return None
def lowerCamelCase__ ( a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : List[Any] ) -> Any:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCamelCase_ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(a__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(a__ , a__ , a__ , a__ )
elif point > right:
return interpolation_search_by_recursion(a__ , a__ , a__ , a__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
a__ , a__ , a__ , point - 1 )
else:
return interpolation_search_by_recursion(
a__ , a__ , point + 1 , a__ )
def lowerCamelCase__ ( a__ : Tuple ) -> Any:
if collection != sorted(a__ ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
_A = 0
if debug == 1:
_A = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
_A = 67
_A = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('''Not found''')
| 261
| 1
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
A_ = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', """stage2.cls_token""") )
return token
def UpperCAmelCase__ ( ) -> str:
A_ = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = """imagenet-1k-id2label.json"""
A_ = 10_00
A_ = """huggingface/label-files"""
A_ = num_labels
A_ = json.load(open(cached_download(hf_hub_url(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ) ), """r""" ) )
A_ = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
A_ = A_ = CvtConfig(num_labels=UpperCAmelCase__, idalabel=UpperCAmelCase__, labelaid=UpperCAmelCase__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""", 1 )[-1][4:6] == "13":
A_ = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""", 1 )[-1][4:6] == "21":
A_ = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
A_ = [2, 2, 20]
A_ = [3, 12, 16]
A_ = [1_92, 7_68, 10_24]
A_ = CvtForImageClassification(UpperCAmelCase__ )
A_ = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
A_ = image_size
A_ = torch.load(UpperCAmelCase__, map_location=torch.device("""cpu""" ) )
A_ = OrderedDict()
A_ = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
A_ = list_of_state_dict + cls_token(UpperCAmelCase__ )
A_ = list_of_state_dict + embeddings(UpperCAmelCase__ )
for cnt in range(config.depth[idx] ):
A_ = list_of_state_dict + attention(UpperCAmelCase__, UpperCAmelCase__ )
A_ = list_of_state_dict + final()
for gg in list_of_state_dict:
print(UpperCAmelCase__ )
for i in range(len(UpperCAmelCase__ ) ):
A_ = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
image_processor.save_pretrained(UpperCAmelCase__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCamelCase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 162
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
__lowerCamelCase = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 162
| 1
|
"""simple docstring"""
import heapq
import sys
import numpy as np
a :Union[str, Any] = tuple[int, int]
class __a :
'''simple docstring'''
def __init__( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : List[Any] = set()
def _a ( self ) -> Dict:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def _a ( self ) -> int:
"""simple docstring"""
return len(self.elements ) == 0
def _a ( self , _a , _a ) -> List[Any]:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_a )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
(SCREAMING_SNAKE_CASE__) : Dict = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
(SCREAMING_SNAKE_CASE__) : Union[str, Any] = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _a ( self , _a ) -> Dict:
"""simple docstring"""
if item in self.set:
self.set.remove(_a )
SCREAMING_SNAKE_CASE__ : List[str] = []
(SCREAMING_SNAKE_CASE__) : Tuple = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
(SCREAMING_SNAKE_CASE__) : List[str] = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _a ( self ) -> Tuple:
"""simple docstring"""
return self.elements[0][1]
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
(SCREAMING_SNAKE_CASE__) : Tuple = heapq.heappop(self.elements )
self.set.remove(_a )
return (priority, item)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
# euclidean distance
SCREAMING_SNAKE_CASE__ : List[str] = np.array(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = np.array(__lowerCAmelCase )
return np.linalg.norm(a - b )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
# integer division by time variable
return consistent_heuristic(__lowerCAmelCase , __lowerCAmelCase ) // t
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] = g_function[start] + Wa * heuristics[i](__lowerCAmelCase , __lowerCAmelCase )
return ans
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : Dict = np.chararray((n, n) )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """*"""
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE__ : List[str] = """#"""
SCREAMING_SNAKE_CASE__ : Dict = """-"""
SCREAMING_SNAKE_CASE__ : List[str] = back_pointer[goal]
while x != start:
(SCREAMING_SNAKE_CASE__) : Optional[Any] = x
# print(x)
SCREAMING_SNAKE_CASE__ : int = """-"""
SCREAMING_SNAKE_CASE__ : int = back_pointer[x]
SCREAMING_SNAKE_CASE__ : Optional[int] = """-"""
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
SCREAMING_SNAKE_CASE__ : List[str] = back_pointer[goal]
while x != start:
print(__lowerCAmelCase , end=""" """ )
SCREAMING_SNAKE_CASE__ : List[str] = back_pointer[x]
print(__lowerCAmelCase )
sys.exit()
def _lowercase ( __lowerCAmelCase ) -> Optional[Any]:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Union[str, Any]:
for itera in range(__lowerCAmelCase ):
open_list[itera].remove_element(__lowerCAmelCase )
# print("s", s)
# print("j", j)
(SCREAMING_SNAKE_CASE__) : Dict = s
SCREAMING_SNAKE_CASE__ : Any = (x - 1, y)
SCREAMING_SNAKE_CASE__ : List[str] = (x + 1, y)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (x, y + 1)
SCREAMING_SNAKE_CASE__ : Dict = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__lowerCAmelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = -1
SCREAMING_SNAKE_CASE__ : List[str] = float("""inf""" )
if valid(__lowerCAmelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE__ : Any = g_function[s] + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = s
if neighbours not in close_list_anchor:
open_list[0].put(__lowerCAmelCase , key(__lowerCAmelCase , 0 , __lowerCAmelCase , __lowerCAmelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , __lowerCAmelCase ):
if key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) <= Wa * key(
__lowerCAmelCase , 0 , __lowerCAmelCase , __lowerCAmelCase ):
open_list[j].put(
__lowerCAmelCase , key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) )
def _lowercase ( ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a :List[str] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a :int = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a :List[Any] = make_common_ground()
a :List[Any] = blocks_blk
# hyper parameters
a :Optional[Any] = 1
a :List[str] = 1
a :Any = 20
a :Tuple = 3 # one consistent and two other inconsistent
# start and end destination
a :str = (0, 0)
a :List[Any] = (n - 1, n - 1)
a :Dict = 1
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = {start: 0, goal: float("""inf""" )}
SCREAMING_SNAKE_CASE__ : Tuple = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Tuple = set()
for i in range(__lowerCAmelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(__lowerCAmelCase , key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : list[int] = []
SCREAMING_SNAKE_CASE__ : list[int] = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , __lowerCAmelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = open_list[i].top_show()
visited.add(__lowerCAmelCase )
expand_state(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
close_list_inad.append(__lowerCAmelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = open_list[0].top_show()
visited.add(__lowerCAmelCase )
expand_state(
__lowerCAmelCase , 0 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
close_list_anchor.append(__lowerCAmelCase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__lowerCAmelCase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 355
|
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def _lowercase ( __lowerCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowercase ( ) -> Iterator[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def _lowercase ( __lowerCAmelCase = 200_0000 ) -> int:
return sum(takewhile(lambda __lowerCAmelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 56
| 0
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( _lowercase , unittest.TestCase):
_a : Union[str, Any] = FunnelTokenizer
_a : str = FunnelTokenizerFast
_a : str = True
_a : List[str] = True
def UpperCAmelCase__( self : Union[str, Any] )-> Dict:
super().setUp()
lowerCAmelCase__ : Optional[Any] = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase__( self : List[Any] , **_SCREAMING_SNAKE_CASE : List[str] )-> Any:
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[Any] , **_SCREAMING_SNAKE_CASE : str )-> int:
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : Optional[Any] )-> Optional[int]:
lowerCAmelCase__ : str = '''UNwant\u00E9d,running'''
lowerCAmelCase__ : int = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__( self : str )-> str:
lowerCAmelCase__ : Union[str, Any] = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ : Union[str, Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [7, 4, 5, 10, 8, 9] )
def UpperCAmelCase__( self : List[str] )-> Optional[int]:
lowerCAmelCase__ : Optional[int] = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
lowerCAmelCase__ : Tuple = tokenizer('''UNwant\u00E9d,running''' )
lowerCAmelCase__ : Any = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
lowerCAmelCase__ : int = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 131
|
import unittest
from transformers import DonutProcessor
lowerCamelCase = '''naver-clova-ix/donut-base'''
class _a ( unittest.TestCase):
def UpperCAmelCase__( self : str )-> int:
lowerCAmelCase__ : Any = DonutProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[int] )-> List[Any]:
lowerCAmelCase__ : Dict = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowerCAmelCase__ : Any = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowerCAmelCase__ : str = self.processor.tokenajson(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 131
| 1
|
from math import factorial, radians
def UpperCAmelCase_ (_lowerCAmelCase : float , _lowerCAmelCase : int = 18 , _lowerCAmelCase : int = 10 ):
__UpperCamelCase : Any = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__UpperCamelCase : Dict = radians(_lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = angle_in_radians
__UpperCamelCase : int = 3
__UpperCamelCase : int = -1
for _ in range(_lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(_lowerCAmelCase )
__UpperCamelCase : List[Any] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
__import__("doctest").testmod()
| 171
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : Union[str, Any] = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = ["GLPNFeatureExtractor"]
lowercase : Tuple = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
lowercase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 171
| 1
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ={
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE ={
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
SCREAMING_SNAKE_CASE =F'{src_lang}-{tgt_lang}'
SCREAMING_SNAKE_CASE =F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(lowerCAmelCase_, exist_ok=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, 'README.md' )
print(F'Generating {path}' )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(lowerCAmelCase_ )
# make sure we are under the root of the project
_lowerCamelCase =Path(__file__).resolve().parent.parent.parent
_lowerCamelCase =repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase =model_name.split("-")
_lowerCamelCase =model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 334
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"vocab_file": "vocab.txt"}
_lowerCamelCase ={
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase ={
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
with open(lowerCAmelCase_, 'r' ) as f:
SCREAMING_SNAKE_CASE =f.read().splitlines()
return [l.strip() for l in lines]
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self : int ,snake_case : Dict ,snake_case : Dict="<unk>" ,snake_case : Optional[int]="<cls>" ,snake_case : Optional[int]="<pad>" ,snake_case : int="<mask>" ,snake_case : Optional[int]="<eos>" ,**snake_case : List[str] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =load_vocab_file(snake_case )
SCREAMING_SNAKE_CASE =dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE ={tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE =unk_token
SCREAMING_SNAKE_CASE =cls_token
SCREAMING_SNAKE_CASE =pad_token
SCREAMING_SNAKE_CASE =mask_token
SCREAMING_SNAKE_CASE =eos_token
SCREAMING_SNAKE_CASE =self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : Dict ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Tuple ,snake_case : List[str] ,**snake_case : Any ):
return text.split()
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str=False ):
return len(self._id_to_token )
def _lowerCAmelCase ( self : List[str] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def _lowerCAmelCase ( self : List[Any] ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Any ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : List[str] ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE =[self.cls_token_id]
SCREAMING_SNAKE_CASE =[self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _lowerCAmelCase ( self : Optional[int] ,snake_case : List ,snake_case : Optional[List] = None ,snake_case : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE =[1] + ([0] * len(snake_case )) + [1]
if token_ids_a is not None:
mask += [0] * len(snake_case ) + [1]
return mask
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Dict ,snake_case : Any ):
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,(filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(snake_case ,'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def _lowerCAmelCase ( self : int ):
return self.get_vocab_size(with_added_tokens=snake_case )
def _lowerCAmelCase ( self : str ,snake_case : Union[List[str], List[AddedToken]] ,snake_case : bool = False ):
return super()._add_tokens(snake_case ,special_tokens=snake_case )
| 334
| 1
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__="divided_space_time" , SCREAMING_SNAKE_CASE__=None , ):
lowercase : Any = parent
lowercase : Optional[Any] = batch_size
lowercase : Optional[Any] = image_size
lowercase : str = num_channels
lowercase : List[Any] = patch_size
lowercase : Dict = num_frames
lowercase : Optional[Any] = is_training
lowercase : Optional[int] = use_labels
lowercase : int = hidden_size
lowercase : List[Any] = num_hidden_layers
lowercase : List[Any] = num_attention_heads
lowercase : Any = intermediate_size
lowercase : List[Any] = hidden_act
lowercase : Union[str, Any] = hidden_dropout_prob
lowercase : str = attention_probs_dropout_prob
lowercase : List[Any] = attention_type
lowercase : Dict = initializer_range
lowercase : str = scope
lowercase : List[Any] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase : str = (image_size // patch_size) ** 2
lowercase : Tuple = (num_frames) * self.num_patches_per_frame + 1
def __lowerCamelCase ( self ):
lowercase : List[str] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase : str = None
if self.use_labels:
lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowercase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
lowercase : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase : int = self.num_labels
return config
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = TimesformerModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Optional[int] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = TimesformerForVideoClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Any = model(SCREAMING_SNAKE_CASE__ )
# verify the logits shape
lowercase : Optional[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : Tuple = config_and_inputs
lowercase : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
A : Union[str, Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
A : List[str] = (
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
A : str = False
A : int = False
A : Dict = False
A : List[str] = False
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = TimesformerModelTester(self )
lowercase : Any = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
lowercase : int = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def __lowerCamelCase ( self ):
lowercase , lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Dict = [*signature.parameters.keys()]
lowercase : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCamelCase ( self ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any = TimesformerModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
if not self.has_attentions:
pass
else:
lowercase , lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : List[str] = True
for model_class in self.all_model_classes:
lowercase : str = self.model_tester.seq_length
lowercase : str = self.model_tester.num_frames
lowercase : List[str] = True
lowercase : str = False
lowercase : List[Any] = True
lowercase : List[Any] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowercase : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowercase : List[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase : Union[str, Any] = True
lowercase : List[Any] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowercase : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowercase : int = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
# Check attention is always last and order is fine
lowercase : Optional[Any] = True
lowercase : Optional[int] = True
lowercase : int = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowercase : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE__ ) )
lowercase : int = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __lowerCamelCase ( self ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowercase : Dict = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowercase : Dict = outputs.hidden_states
lowercase : Optional[int] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
lowercase : int = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Any = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : int = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : Optional[Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' )
lowercase : Optional[Any] = np.load(_UpperCamelCase )
return list(_UpperCamelCase )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
lowercase : Dict = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self.default_image_processor
lowercase : Union[str, Any] = prepare_video()
lowercase : Union[str, Any] = image_processor(video[:8] , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
lowercase : int = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
lowercase : Optional[Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 173
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__a = 50_00_00
__a , __a = os.path.split(__file__)
__a = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def __lowercase ( _UpperCamelCase, **_UpperCamelCase ) ->Any:
"""simple docstring"""
lowercase : Optional[Any] = dataset.map(**_UpperCamelCase )
@get_duration
def __lowercase ( _UpperCamelCase, **_UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : int = dataset.filter(**_UpperCamelCase )
def __lowercase ( ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Dict = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Dict = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase : List[str] = generate_example_dataset(
os.path.join(_UpperCamelCase, '''dataset.arrow''' ), _UpperCamelCase, num_examples=_UpperCamelCase )
lowercase : List[Any] = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=_UpperCamelCase )
def tokenize(_UpperCamelCase ):
return tokenizer(examples['''text'''] )
lowercase : Union[str, Any] = map(_UpperCamelCase )
lowercase : Dict = map(_UpperCamelCase, batched=_UpperCamelCase )
lowercase : Tuple = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''numpy''' ):
lowercase : Dict = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''pandas''' ):
lowercase : Any = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''torch''', columns='''numbers''' ):
lowercase : str = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ):
lowercase : Tuple = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
lowercase : List[str] = map(_UpperCamelCase, function=_UpperCamelCase, batched=_UpperCamelCase )
lowercase : Any = filter(_UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_UpperCamelCase, '''wb''' ) as f:
f.write(json.dumps(_UpperCamelCase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 173
| 1
|
"""simple docstring"""
def _lowerCamelCase( a ):
if length <= 0 or not isinstance(a , a ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(a )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 261
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__:List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 261
| 1
|
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
_UpperCAmelCase : str = getLogger(__name__)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 8 , UpperCamelCase__ = 1024 , UpperCamelCase__="val" , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__="summarization" , UpperCamelCase__=None , UpperCamelCase__=1 , UpperCamelCase__ = None , UpperCamelCase__="" , **UpperCamelCase__ , ):
'''simple docstring'''
snake_case_ = str(UpperCamelCase__ )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl' , rank=UpperCamelCase__ )
snake_case_ = Path(UpperCamelCase__ )
snake_case_ = save_dir.joinpath(F'''rank_{local_rank}_output.json''' )
torch.cuda.set_device(UpperCamelCase__ )
snake_case_ = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ).cuda()
if fpaa:
snake_case_ = model.half()
# determine if we need to increase num_beams
use_task_specific_params(UpperCamelCase__ , UpperCamelCase__ ) # update config with task specific params
snake_case_ = generate_kwargs.pop('num_beams' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
snake_case_ = num_return_sequences
snake_case_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
if max_source_length is None:
snake_case_ = tokenizer.model_max_length
if prefix is None:
snake_case_ = prefix or getattr(model.config , 'prefix' , '' ) or ''
snake_case_ = SeqaSeqDataset(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , max_target_length=1024 , type_path=UpperCamelCase__ , n_obs=UpperCamelCase__ , prefix=UpperCamelCase__ , **UpperCamelCase__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
snake_case_ = ds.make_sortish_sampler(UpperCamelCase__ , distributed=UpperCamelCase__ , add_extra_examples=UpperCamelCase__ , shuffle=UpperCamelCase__ )
snake_case_ = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=ds.collate_fn )
snake_case_ = []
for batch in tqdm(UpperCamelCase__ ):
snake_case_ = model.generate(
input_ids=batch['input_ids'].to(model.device ) , attention_mask=batch['attention_mask'].to(model.device ) , num_return_sequences=UpperCamelCase__ , num_beams=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case_ = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
snake_case_ = batch['ids']
if num_return_sequences > 1:
snake_case_ = chunks(UpperCamelCase__ , UpperCamelCase__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(UpperCamelCase__ ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(UpperCamelCase__ , UpperCamelCase__ )
return results, sampler.num_replicas
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir' , type=UpperCamelCase__ , help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name' , type=UpperCamelCase__ , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , )
parser.add_argument('--save_dir' , type=UpperCamelCase__ , help='where to save' , default='tmp_gen' )
parser.add_argument('--max_source_length' , type=UpperCamelCase__ , default=UpperCamelCase__ )
parser.add_argument(
'--type_path' , type=UpperCamelCase__ , default='test' , help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task' , type=UpperCamelCase__ , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=UpperCamelCase__ , default=8 , required=UpperCamelCase__ , help='batch size' )
parser.add_argument(
'--local_rank' , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs' , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences' , type=UpperCamelCase__ , default=1 , required=UpperCamelCase__ , help='How many sequences to return' )
parser.add_argument(
'--sync_timeout' , type=UpperCamelCase__ , default=600 , required=UpperCamelCase__ , help='How long should master process wait for other processes to finish.' , )
parser.add_argument('--src_lang' , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ )
parser.add_argument('--tgt_lang' , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ )
parser.add_argument(
'--prefix' , type=UpperCamelCase__ , required=UpperCamelCase__ , default=UpperCamelCase__ , help='will be added to the begininng of src examples' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--debug' , action='store_true' )
snake_case_ = time.time()
snake_case_ , snake_case_ = parser.parse_known_args()
snake_case_ = parse_numeric_n_bool_cl_kwargs(UpperCamelCase__ )
if generate_kwargs and args.local_rank <= 0:
print(F'''parsed the following generate kwargs: {generate_kwargs}''' )
snake_case_ = Path(args.save_dir + '_tmp' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) # this handles locking.
snake_case_ = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(F'''Found files at {json_save_dir} please move or remove them.''' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
snake_case_ = {}
if args.src_lang is not None:
snake_case_ = args.src_lang
if args.tgt_lang is not None:
snake_case_ = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=UpperCamelCase__ )
snake_case_ , snake_case_ = eval_data_dir(
args.data_dir , UpperCamelCase__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=UpperCamelCase__ , **UpperCamelCase__ , )
if args.local_rank <= 0:
snake_case_ = Path(args.save_dir )
save_dir.mkdir(exist_ok=UpperCamelCase__ )
snake_case_ = gather_results_from_each_node(UpperCamelCase__ , UpperCamelCase__ , args.sync_timeout )
snake_case_ = combine_partial_results(UpperCamelCase__ )
if args.num_return_sequences > 1:
snake_case_ = save_dir.joinpath('pseudolabel_results.json' )
print(F'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' )
save_json(UpperCamelCase__ , UpperCamelCase__ )
return
snake_case_ = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(UpperCamelCase__ ) as f:
snake_case_ = [x.rstrip() for x in f.readlines()][: len(UpperCamelCase__ )]
# Calculate metrics, save metrics, and save _generations.txt
snake_case_ = 'translation' in args.task
snake_case_ = calculate_bleu if calc_bleu else calculate_rouge
snake_case_ = 'bleu' if calc_bleu else 'rouge'
snake_case_ = score_fn(UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = len(UpperCamelCase__ )
snake_case_ = time.time() - start_time
snake_case_ = round(runtime / metrics['n_obs'] , 4 )
snake_case_ = num_replicas
# TODO(@stas00): add whatever metadata to metrics
snake_case_ = save_dir.joinpath(F'''{args.type_path}_{metric_name}.json''' )
save_json(UpperCamelCase__ , UpperCamelCase__ , indent=UpperCamelCase__ )
print(UpperCamelCase__ )
write_txt_file(UpperCamelCase__ , save_dir.joinpath(F'''{args.type_path}_generations.txt''' ) )
if args.debug:
write_txt_file(UpperCamelCase__ , save_dir.joinpath(F'''{args.type_path}.target''' ) )
else:
shutil.rmtree(UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = []
for partial_result in partial_results:
records.extend(UpperCamelCase__ )
snake_case_ = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x["id"] )
snake_case_ = [x['pred'] for x in records]
return preds
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = time.time()
logger.info('waiting for all nodes to finish' )
snake_case_ = None
while (time.time() - start_wait) < timeout:
snake_case_ = list(save_dir.glob('rank_*.json' ) )
if len(UpperCamelCase__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
snake_case_ = lmap(UpperCamelCase__ , UpperCamelCase__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 200
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@slow
def a ( self ):
snake_case_ = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
snake_case_ = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(snake_case )
from datasets import load_dataset
snake_case_ = load_dataset('nielsr/rvlcdip-demo' )
snake_case_ = dataset['train'][0]['image'].convert('RGB' )
snake_case_ = image_processor(snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
snake_case_ = model(**snake_case )
snake_case_ = outputs.logits
snake_case_ = torch.Size((1, 16) )
self.assertEqual(logits.shape , snake_case )
snake_case_ = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=snake_case , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case , atol=1e-4 ) )
| 200
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : Tuple = global_rng
_A : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=1 , _a=0.0 , _a=1_6000 , _a=True , _a=True , ) -> Any:
_A : Union[str, Any] = parent
_A : Dict = batch_size
_A : int = min_seq_length
_A : int = max_seq_length
_A : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Any = feature_size
_A : List[str] = padding_value
_A : int = sampling_rate
_A : Dict = return_attention_mask
_A : Any = do_normalize
def a__ ( self ) -> List[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , _a=False , _a=False ) -> str:
def _flatten(_a ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
_A : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_A : int = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : str = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
class lowercase ( _lowerCamelCase,unittest.TestCase ):
_a = WavaVecaFeatureExtractor
def a__ ( self ) -> Tuple:
_A : Dict = WavaVecaFeatureExtractionTester(self )
def a__ ( self , _a ) -> Optional[Any]:
self.assertTrue(np.all(np.mean(lowercase_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase_ , axis=0 ) - 1 ) < 1e-3 ) )
def a__ ( self ) -> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Any = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test not batched input
_A : Dict = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
_A : str = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
# Test batched
_A : List[str] = feat_extract(lowercase_ , return_tensors="""np""" ).input_values
_A : List[str] = feat_extract(lowercase_ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_A : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Optional[Any] = np.asarray(lowercase_ )
_A : Optional[int] = feat_extract(lowercase_ , return_tensors="""np""" ).input_values
_A : List[Any] = feat_extract(lowercase_ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
def a__ ( self ) -> Union[str, Any]:
_A : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Optional[Any] = ["""longest""", """max_length""", """do_not_pad"""]
_A : str = [None, 1600, None]
for max_length, padding in zip(lowercase_ , lowercase_ ):
_A : int = feat_extract(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors="""np""" )
_A : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def a__ ( self ) -> Any:
_A : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : List[Any] = range(800 , 1400 , 200 )
_A : str = [floats_list((1, x) )[0] for x in lengths]
_A : Optional[Any] = ["""longest""", """max_length""", """do_not_pad"""]
_A : List[str] = [None, 1600, None]
for max_length, padding in zip(lowercase_ , lowercase_ ):
_A : Dict = feat_extract(lowercase_ , max_length=lowercase_ , padding=lowercase_ )
_A : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Any = feat_extract(
lowercase_ , truncation=lowercase_ , max_length=1000 , padding="""max_length""" , return_tensors="""np""" )
_A : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def a__ ( self ) -> Union[str, Any]:
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Tuple = feat_extract(
lowercase_ , truncation=lowercase_ , max_length=1000 , padding="""longest""" , return_tensors="""np""" )
_A : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
_A : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Optional[int] = feat_extract(
lowercase_ , truncation=lowercase_ , max_length=2000 , padding="""longest""" , return_tensors="""np""" )
_A : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def a__ ( self ) -> List[Any]:
import torch
_A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : List[str] = np.random.rand(100 ).astype(np.floataa )
_A : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A : Optional[int] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_A : Union[str, Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def a__ ( self ) -> Union[str, Any]:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_A : Union[str, Any] = WavaVecaConfig.from_pretrained(lowercase_ )
_A : Any = WavaVecaFeatureExtractor.from_pretrained(lowercase_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
| 26
|
'''simple docstring'''
from collections import defaultdict
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ = 1
snake_case_ = True
for v in tree[start]:
if v not in visited:
ret += dfs(__UpperCAmelCase )
if ret % 2 == 0:
cuts.append(__UpperCAmelCase )
return ret
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
a ,a : Dict = 10, 9
a : Dict = defaultdict(list)
a : dict[int, bool] = {}
a : list[int] = []
a : Tuple = 0
a : str = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 56
| 0
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 230
|
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : Optional[int] , snake_case_ : int = 16 , snake_case_ : int = 88 , snake_case_ : Optional[int] = None , snake_case_ : int = 1 , snake_case_ : float = 0.0 , snake_case_ : int = 32 , snake_case_ : Optional[int] = None , snake_case_ : bool = False , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , snake_case_ : str = "geglu" , snake_case_ : Optional[int] = None , ) -> str:
'''simple docstring'''
super().__init__()
A__ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=snake_case_ , attention_head_dim=snake_case_ , in_channels=snake_case_ , num_layers=snake_case_ , dropout=snake_case_ , norm_num_groups=snake_case_ , cross_attention_dim=snake_case_ , attention_bias=snake_case_ , sample_size=snake_case_ , num_vector_embeds=snake_case_ , activation_fn=snake_case_ , num_embeds_ada_norm=snake_case_ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
A__ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
A__ = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
A__ = [1, 0]
def __magic_name__ ( self : Dict , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Any=None , snake_case_ : int=None , snake_case_ : Union[str, Any]=None , snake_case_ : bool = True , ) -> Union[str, Any]:
'''simple docstring'''
A__ = hidden_states
A__ = []
A__ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
A__ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
A__ = self.transformer_index_for_condition[i]
A__ = self.transformers[transformer_index](
snake_case_ , encoder_hidden_states=snake_case_ , timestep=snake_case_ , cross_attention_kwargs=snake_case_ , return_dict=snake_case_ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
A__ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
A__ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=snake_case_ )
| 230
| 1
|
"""simple docstring"""
from numpy import exp, pi, sqrt
def a__ ( lowerCAmelCase , lowerCAmelCase = 0.0 , lowerCAmelCase = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 171
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=4 , ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : Optional[int] = seq_length
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : int = use_attention_mask
UpperCAmelCase__ : Any = use_token_type_ids
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : Optional[Any] = num_attention_heads
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = type_vocab_size
UpperCAmelCase__ : Dict = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : int = num_choices
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : int = None
if self.use_attention_mask:
UpperCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : List[Any] = None
if self.use_token_type_ids:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = config_and_inputs
UpperCAmelCase__ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = config_and_inputs
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = FlaxRobertaModelTester(self )
@slow
def _a (self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class_name.from_pretrained("""roberta-base""" , from_pt=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
| 171
| 1
|
'''simple docstring'''
import math
import sys
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = """"""
try:
with open(A__ , """rb""" ) as binary_file:
lowerCAmelCase_ : Dict = binary_file.read()
for dat in data:
lowerCAmelCase_ : str = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = {"""0""": """0""", """1""": """1"""}
lowerCAmelCase_, lowerCAmelCase_ : Any = """""", """"""
lowerCAmelCase_ : Tuple = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCAmelCase_ : str = lexicon[curr_string]
result += last_match_id
lowerCAmelCase_ : Tuple = last_match_id + """0"""
if math.loga(A__ ).is_integer():
lowerCAmelCase_ : Optional[Any] = {}
for curr_key in list(A__ ):
lowerCAmelCase_ : Optional[int] = lexicon.pop(A__ )
lowerCAmelCase_ : Any = new_lex
lowerCAmelCase_ : Optional[Any] = last_match_id + """1"""
index += 1
lowerCAmelCase_ : Any = """"""
return result
def UpperCamelCase_ ( A__ : str , A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = 8
try:
with open(A__ , """wb""" ) as opened_file:
lowerCAmelCase_ : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(A__ ) , A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowerCAmelCase_ : Optional[int] = data_bits[counter:]
lowerCAmelCase_ : List[Any] = data_bits[counter + 1 :]
return data_bits
def UpperCamelCase_ ( A__ : str , A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = read_file_binary(A__ )
lowerCAmelCase_ : List[str] = remove_prefix(A__ )
lowerCAmelCase_ : Tuple = decompress_data(A__ )
write_file_binary(A__ , A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 89
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__A : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCAmelCase_ : str = get_sagemaker_input()
else:
lowerCAmelCase_ : Optional[int] = get_cluster_input()
return config
def UpperCamelCase_ ( A__ : Optional[Any]=None ):
'''simple docstring'''
if subparsers is not None:
lowerCAmelCase_ : List[str] = subparsers.add_parser("""config""" , description=A__ )
else:
lowerCAmelCase_ : Optional[int] = argparse.ArgumentParser("""Accelerate config command""" , description=A__ )
parser.add_argument(
"""--config_file""" , default=A__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def UpperCamelCase_ ( A__ : Any ):
'''simple docstring'''
lowerCAmelCase_ : Dict = get_user_input()
if args.config_file is not None:
lowerCAmelCase_ : List[str] = args.config_file
else:
if not os.path.isdir(A__ ):
os.makedirs(A__ )
lowerCAmelCase_ : List[Any] = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(A__ )
else:
config.to_yaml_file(A__ )
print(f'accelerate configuration saved at {config_file}' )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : str = config_command_parser()
lowerCAmelCase_ : Tuple = parser.parse_args()
config_command(A__ )
if __name__ == "__main__":
main()
| 89
| 1
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =[
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =emb.weight.shape
SCREAMING_SNAKE_CASE_: List[Any] =nn.Linear(lowercase , lowercase , bias=lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =emb.weight.data
return lin_layer
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =torch.load(lowercase , map_location="""cpu""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] =mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
SCREAMING_SNAKE_CASE_: int =mam_aaa["""model"""]
remove_ignore_keys_(lowercase )
SCREAMING_SNAKE_CASE_: int =state_dict["""encoder.embed_tokens.weight"""].shape[0]
SCREAMING_SNAKE_CASE_: Optional[int] =MaMaaaConfig(
vocab_size=lowercase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
SCREAMING_SNAKE_CASE_: Any =state_dict["""decoder.embed_tokens.weight"""]
SCREAMING_SNAKE_CASE_: int =MaMaaaForConditionalGeneration(lowercase )
model.model.load_state_dict(lowercase , strict=lowercase )
SCREAMING_SNAKE_CASE_: Any =make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 173
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
_UpperCAmelCase = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
_UpperCAmelCase = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
_UpperCAmelCase = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def lowerCamelCase__ ( self : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict ={prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
SCREAMING_SNAKE_CASE_: Tuple =[
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE_: str =evaluate(dataset=lowerCAmelCase , predictions=lowerCAmelCase )
return score
| 173
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Any = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 354
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE : Dict = 16
SCREAMING_SNAKE_CASE : str = 32
def UpperCamelCase ( _a ) -> Any:
'''simple docstring'''
return int(x / 2**2_0 )
class UpperCamelCase :
'''simple docstring'''
def __enter__( self ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase_ :List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *UpperCamelCase_ ):
gc.collect()
torch.cuda.empty_cache()
lowercase_ :Any = torch.cuda.memory_allocated()
lowercase_ :Union[str, Any] = torch.cuda.max_memory_allocated()
lowercase_ :Optional[int] = bamb(self.end - self.begin )
lowercase_ :List[str] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def UpperCamelCase ( _a , _a = 1_6 , _a = "bert-base-cased" , _a = 3_2_0 , _a = 1_6_0 , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ :Optional[Any] = AutoTokenizer.from_pretrained(_a )
lowercase_ :int = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': f"train[:{n_train}]", '''validation''': f"validation[:{n_val}]"} )
def tokenize_function(_a ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ :Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_a , max_length=_a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase_ :Tuple = datasets.map(
_a , batched=_a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ :int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_a , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return tokenizer.pad(_a , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowercase_ :Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=_a , collate_fn=_a , batch_size=_a )
lowercase_ :str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_a , collate_fn=_a , batch_size=_a )
return train_dataloader, eval_dataloader
def UpperCamelCase ( _a , _a ) -> List[Any]:
'''simple docstring'''
lowercase_ :Tuple = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ :Dict = config['''lr''']
lowercase_ :List[Any] = int(config['''num_epochs'''] )
lowercase_ :Tuple = int(config['''seed'''] )
lowercase_ :List[str] = int(config['''batch_size'''] )
lowercase_ :Optional[Any] = args.model_name_or_path
set_seed(_a )
lowercase_ , lowercase_ :Any = get_dataloaders(_a , _a , _a , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ :Tuple = AutoModelForSequenceClassification.from_pretrained(_a , return_dict=_a )
# Instantiate optimizer
lowercase_ :Any = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase_ :str = optimizer_cls(params=model.parameters() , lr=_a )
if accelerator.state.deepspeed_plugin is not None:
lowercase_ :str = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowercase_ :List[str] = 1
lowercase_ :Union[str, Any] = (len(_a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase_ :int = get_linear_schedule_with_warmup(
optimizer=_a , num_warmup_steps=0 , num_training_steps=_a , )
else:
lowercase_ :str = DummyScheduler(_a , total_num_steps=_a , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ :Optional[Any] = accelerator.prepare(
_a , _a , _a , _a , _a )
# We need to keep track of how many total steps we have iterated over
lowercase_ :Dict = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase_ :int = 0
# Now we train the model
lowercase_ :str = {}
for epoch in range(_a , _a ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_a ):
lowercase_ :Optional[Any] = model(**_a )
lowercase_ :Dict = outputs.loss
lowercase_ :Dict = loss / gradient_accumulation_steps
accelerator.backward(_a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase_ :Union[str, Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(_a , _a )
def UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ :List[str] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_a , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_a , )
parser.add_argument(
'''--output_dir''' , type=_a , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=_a , default=_a , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=_a , default=3_2_0 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=_a , default=1_6_0 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=_a , default=1 , help='''Number of train epochs.''' , )
lowercase_ :Dict = parser.parse_args()
lowercase_ :Dict = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(_a , _a )
if __name__ == "__main__":
main()
| 252
| 0
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : Tuple = """wav2vec2"""
def __init__( self , __snake_case=32 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.02 , __snake_case=1e-5 , __snake_case="group" , __snake_case="gelu" , __snake_case=(512, 512, 512, 512, 512, 512, 512) , __snake_case=(5, 2, 2, 2, 2, 2, 2) , __snake_case=(10, 3, 3, 3, 3, 2, 2) , __snake_case=False , __snake_case=128 , __snake_case=16 , __snake_case=False , __snake_case=True , __snake_case=0.05 , __snake_case=10 , __snake_case=2 , __snake_case=0.0 , __snake_case=10 , __snake_case=0 , __snake_case=320 , __snake_case=2 , __snake_case=0.1 , __snake_case=100 , __snake_case=256 , __snake_case=256 , __snake_case=0.1 , __snake_case="sum" , __snake_case=False , __snake_case=False , __snake_case=256 , __snake_case=(512, 512, 512, 512, 1500) , __snake_case=(5, 3, 3, 1, 1) , __snake_case=(1, 2, 3, 1, 1) , __snake_case=512 , __snake_case=0 , __snake_case=1 , __snake_case=2 , __snake_case=False , __snake_case=3 , __snake_case=2 , __snake_case=3 , __snake_case=None , __snake_case=None , **__snake_case , ):
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : Dict = feat_extract_norm
_SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract_activation
_SCREAMING_SNAKE_CASE : List[str] = list(__snake_case )
_SCREAMING_SNAKE_CASE : int = list(__snake_case )
_SCREAMING_SNAKE_CASE : Dict = list(__snake_case )
_SCREAMING_SNAKE_CASE : Any = conv_bias
_SCREAMING_SNAKE_CASE : List[Any] = num_conv_pos_embeddings
_SCREAMING_SNAKE_CASE : str = num_conv_pos_embedding_groups
_SCREAMING_SNAKE_CASE : Dict = len(self.conv_dim )
_SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
_SCREAMING_SNAKE_CASE : List[Any] = hidden_act
_SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
_SCREAMING_SNAKE_CASE : Tuple = hidden_dropout
_SCREAMING_SNAKE_CASE : List[Any] = attention_dropout
_SCREAMING_SNAKE_CASE : List[Any] = activation_dropout
_SCREAMING_SNAKE_CASE : Dict = feat_proj_dropout
_SCREAMING_SNAKE_CASE : Optional[Any] = final_dropout
_SCREAMING_SNAKE_CASE : List[str] = layerdrop
_SCREAMING_SNAKE_CASE : str = layer_norm_eps
_SCREAMING_SNAKE_CASE : Any = initializer_range
_SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
_SCREAMING_SNAKE_CASE : Any = do_stable_layer_norm
_SCREAMING_SNAKE_CASE : Tuple = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_SCREAMING_SNAKE_CASE : int = apply_spec_augment
_SCREAMING_SNAKE_CASE : int = mask_time_prob
_SCREAMING_SNAKE_CASE : Dict = mask_time_length
_SCREAMING_SNAKE_CASE : Optional[int] = mask_time_min_masks
_SCREAMING_SNAKE_CASE : Union[str, Any] = mask_feature_prob
_SCREAMING_SNAKE_CASE : Optional[int] = mask_feature_length
_SCREAMING_SNAKE_CASE : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_SCREAMING_SNAKE_CASE : Dict = num_codevectors_per_group
_SCREAMING_SNAKE_CASE : Optional[int] = num_codevector_groups
_SCREAMING_SNAKE_CASE : str = contrastive_logits_temperature
_SCREAMING_SNAKE_CASE : int = feat_quantizer_dropout
_SCREAMING_SNAKE_CASE : List[Any] = num_negatives
_SCREAMING_SNAKE_CASE : str = codevector_dim
_SCREAMING_SNAKE_CASE : int = proj_codevector_dim
_SCREAMING_SNAKE_CASE : List[str] = diversity_loss_weight
# ctc loss
_SCREAMING_SNAKE_CASE : Dict = ctc_loss_reduction
_SCREAMING_SNAKE_CASE : Dict = ctc_zero_infinity
# adapter
_SCREAMING_SNAKE_CASE : Any = add_adapter
_SCREAMING_SNAKE_CASE : Union[str, Any] = adapter_kernel_size
_SCREAMING_SNAKE_CASE : Tuple = adapter_stride
_SCREAMING_SNAKE_CASE : str = num_adapter_layers
_SCREAMING_SNAKE_CASE : Optional[int] = output_hidden_size or hidden_size
_SCREAMING_SNAKE_CASE : List[Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_SCREAMING_SNAKE_CASE : List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_SCREAMING_SNAKE_CASE : str = list(__snake_case )
_SCREAMING_SNAKE_CASE : List[str] = list(__snake_case )
_SCREAMING_SNAKE_CASE : Optional[int] = list(__snake_case )
_SCREAMING_SNAKE_CASE : List[str] = xvector_output_dim
@property
def UpperCAmelCase_ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 200
|
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = 0
while len(SCREAMING_SNAKE_CASE__ ) > 1:
_SCREAMING_SNAKE_CASE : Any = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
_SCREAMING_SNAKE_CASE : Optional[int] = files.index(min(SCREAMING_SNAKE_CASE__ ) )
temp += files[min_index]
files.pop(SCREAMING_SNAKE_CASE__ )
files.append(SCREAMING_SNAKE_CASE__ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 200
| 1
|
"""simple docstring"""
def lowerCAmelCase_ (lowerCAmelCase__: int = 5_0 ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 366
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: str = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
UpperCAmelCase_: Tuple = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
UpperCAmelCase_: List[str] = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase_: Optional[Any] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
UpperCAmelCase_: Optional[Any] = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(lowerCAmelCase__ )-1}' )
if "norm" in key:
UpperCAmelCase_: Dict = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase_: Any = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
UpperCAmelCase_: str = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(lowerCAmelCase__ )-1}' )
if "layer_norm1" in key:
UpperCAmelCase_: Tuple = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
UpperCAmelCase_: int = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase_: Any = key[key.find("""block""" ) + len("""block""" )]
UpperCAmelCase_: Optional[Any] = key.replace(F'block{idx}' , F'block.{int(lowerCAmelCase__ )-1}' )
if "attn.q" in key:
UpperCAmelCase_: Dict = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
UpperCAmelCase_: Tuple = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
UpperCAmelCase_: str = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
UpperCAmelCase_: Tuple = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
UpperCAmelCase_: Optional[int] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
UpperCAmelCase_: Optional[int] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
UpperCAmelCase_: Tuple = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
UpperCAmelCase_: List[str] = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase_: Optional[int] = key[key.find("""linear_c""" ) + len("""linear_c""" )]
UpperCAmelCase_: List[str] = key.replace(F'linear_c{idx}' , F'linear_c.{int(lowerCAmelCase__ )-1}' )
if "bot_conv" in key:
UpperCAmelCase_: Optional[Any] = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
UpperCAmelCase_: Any = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
UpperCAmelCase_: Optional[Any] = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
UpperCAmelCase_: Dict = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
UpperCAmelCase_: Any = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
UpperCAmelCase_: Union[str, Any] = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
UpperCAmelCase_: List[Any] = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
UpperCAmelCase_: Union[str, Any] = key.replace("""module.last_layer_depth""" , """head.head""" )
UpperCAmelCase_: Union[str, Any] = value
return new_state_dict
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: str ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase_: str = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
UpperCAmelCase_: Tuple = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase_: Any = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase_: Tuple = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase_: Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase_: int = kv_bias[config.hidden_sizes[i] :]
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase_: List[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase_ (lowerCAmelCase__: List[str] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[Any]=False , lowerCAmelCase__: Optional[Any]=None ):
"""simple docstring"""
UpperCAmelCase_: str = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase_: Dict = GLPNImageProcessor()
# prepare image
UpperCAmelCase_: List[str] = prepare_img()
UpperCAmelCase_: List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
UpperCAmelCase_: Any = torch.load(lowerCAmelCase__ , map_location=torch.device("""cpu""" ) )
# rename keys
UpperCAmelCase_: Optional[Any] = rename_keys(lowerCAmelCase__ )
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# create HuggingFace model and load state dict
UpperCAmelCase_: Dict = GLPNForDepthEstimation(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# forward pass
UpperCAmelCase_: Any = model(lowerCAmelCase__ )
UpperCAmelCase_: Union[str, Any] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase_: List[str] = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCAmelCase_: List[str] = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
UpperCAmelCase_: Any = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase__ , )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
a : List[Any] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 82
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Union[str, Any] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : int = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(__lowercase ) ,torch_builtin(__lowercase ) ) )
self.assertFalse(torch.allclose(gelu_python(__lowercase ) ,gelu_new(__lowercase ) ) )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[Any] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Union[str, Any] = get_activation('''gelu''' )
snake_case__ : int = get_activation('''gelu_10''' )
snake_case__ : Optional[int] = torch_builtin(__lowercase )
snake_case__ : str = geluaa(__lowercase )
snake_case__ : Tuple = torch.where(y_gelu_aa < 10.0 ,1 ,0 )
self.assertTrue(torch.max(__lowercase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask ,y_gelu_aa * clipped_mask ) )
def __lowerCamelCase ( self :Any ):
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(__lowercase ):
get_activation('''bogus''' )
with self.assertRaises(__lowercase ):
get_activation(__lowercase )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : str = get_activation('''gelu''' )
snake_case__ : List[Any] = 1
snake_case__ : Optional[Any] = get_activation('''gelu''' )
self.assertEqual(acta.a ,1 )
with self.assertRaises(__lowercase ):
snake_case__ : str = acta.a
| 230
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Union[str, Any] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : int = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(__lowercase ) ,torch_builtin(__lowercase ) ) )
self.assertFalse(torch.allclose(gelu_python(__lowercase ) ,gelu_new(__lowercase ) ) )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[Any] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Union[str, Any] = get_activation('''gelu''' )
snake_case__ : int = get_activation('''gelu_10''' )
snake_case__ : Optional[int] = torch_builtin(__lowercase )
snake_case__ : str = geluaa(__lowercase )
snake_case__ : Tuple = torch.where(y_gelu_aa < 10.0 ,1 ,0 )
self.assertTrue(torch.max(__lowercase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask ,y_gelu_aa * clipped_mask ) )
def __lowerCamelCase ( self :Any ):
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(__lowercase ):
get_activation('''bogus''' )
with self.assertRaises(__lowercase ):
get_activation(__lowercase )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : str = get_activation('''gelu''' )
snake_case__ : List[Any] = 1
snake_case__ : Optional[Any] = get_activation('''gelu''' )
self.assertEqual(acta.a ,1 )
with self.assertRaises(__lowercase ):
snake_case__ : str = acta.a
| 230
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : List[Any] = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __snake_case ( a ):
UpperCAmelCase__ : List[Any] = '''realm'''
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any]=30522 , _snake_case : int=768 , _snake_case : List[str]=128 , _snake_case : List[str]=12 , _snake_case : Tuple=12 , _snake_case : Tuple=8 , _snake_case : int=3072 , _snake_case : int="gelu_new" , _snake_case : List[str]=0.1 , _snake_case : Dict=0.1 , _snake_case : List[Any]=512 , _snake_case : str=2 , _snake_case : str=0.0_2 , _snake_case : Dict=1e-12 , _snake_case : Union[str, Any]=256 , _snake_case : str=10 , _snake_case : Optional[int]=1e-3 , _snake_case : Dict=5 , _snake_case : Union[str, Any]=320 , _snake_case : Tuple=13353718 , _snake_case : Optional[Any]=5000 , _snake_case : Any=1 , _snake_case : Dict=0 , _snake_case : Optional[int]=2 , **_snake_case : Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case)
# Common config
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = retriever_proj_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_candidates
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = layer_norm_eps
# Reader config
UpperCAmelCase_ = span_hidden_size
UpperCAmelCase_ = max_span_width
UpperCAmelCase_ = reader_layer_norm_eps
UpperCAmelCase_ = reader_beam_size
UpperCAmelCase_ = reader_seq_len
# Retrieval config
UpperCAmelCase_ = num_block_records
UpperCAmelCase_ = searcher_beam_size
| 7
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case_ : int = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
snake_case_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
snake_case_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
snake_case_ : Union[str, Any] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def A (__A : List[Any] , __A : Optional[int] , __A : str , __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
UpperCAmelCase_ = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __A , )
is not None
):
UpperCAmelCase_ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCAmelCase_ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCAmelCase_ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
UpperCAmelCase_ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
UpperCAmelCase_ = True
if not attribute_used:
UpperCAmelCase_ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCAmelCase_ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCAmelCase_ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCAmelCase_ = True
elif attribute.endswith('''_token_id''' ):
UpperCAmelCase_ = True
# configuration class specific cases
if not case_allowed:
UpperCAmelCase_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCAmelCase_ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A (__A : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCAmelCase_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
UpperCAmelCase_ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCAmelCase_ = {}
if len(config_class.attribute_map ) > 0:
UpperCAmelCase_ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCAmelCase_ = inspect.getsourcefile(__A )
UpperCAmelCase_ = os.path.dirname(__A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCAmelCase_ = [os.path.join(__A , __A ) for fn in os.listdir(__A ) if fn.startswith('''modeling_''' )]
# Get the source code strings
UpperCAmelCase_ = []
for path in modeling_paths:
if os.path.isfile(__A ):
with open(__A ) as fp:
modeling_sources.append(fp.read() )
UpperCAmelCase_ = []
for config_param, default_value in zip(__A , __A ):
# `attributes` here is all the variant names for `config_param`
UpperCAmelCase_ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__A , __A , __A , __A ):
unused_attributes.append(attributes[0] )
return sorted(__A )
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCAmelCase_ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(__A )
and issubclass(__A , __A )
and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCAmelCase_ = check_config_attributes_being_used(__A )
if len(__A ) > 0:
UpperCAmelCase_ = unused_attributes
if len(__A ) > 0:
UpperCAmelCase_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(__A )
if __name__ == "__main__":
check_config_attributes()
| 7
| 1
|
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__lowerCAmelCase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def __lowerCamelCase ( ) -> Tuple:
_a : List[Any] = Github(os.environ['GITHUB_TOKEN'] )
_a : List[Any] = g.get_repo('huggingface/transformers' )
_a : Tuple = repo.get_issues(state='open' )
for issue in open_issues:
_a : str = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase_ : i.created_at , reverse=lowerCAmelCase_ )
_a : List[str] = comments[0] if len(lowerCAmelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 89
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> Dict:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple:
_a : Any = []
for old_item in old_list:
_a : Union[str, Any] = old_item.replace('in_layers.0' , 'norm1' )
_a : Optional[int] = new_item.replace('in_layers.2' , 'conv1' )
_a : str = new_item.replace('out_layers.0' , 'norm2' )
_a : List[str] = new_item.replace('out_layers.3' , 'conv2' )
_a : str = new_item.replace('emb_layers.1' , 'time_emb_proj' )
_a : Tuple = new_item.replace('skip_connection' , 'conv_shortcut' )
_a : Any = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Any:
_a : List[str] = []
for old_item in old_list:
_a : List[Any] = old_item
_a : Optional[int] = new_item.replace('norm.weight' , 'group_norm.weight' )
_a : Optional[Any] = new_item.replace('norm.bias' , 'group_norm.bias' )
_a : Any = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
_a : Optional[Any] = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
_a : Optional[int] = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Any:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_a : Optional[Any] = old_checkpoint[path]
_a : Optional[Any] = old_tensor.shape[0] // 3
_a : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_a : int = old_tensor.shape[0] // config['num_head_channels'] // 3
_a : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_a , _a , _a : Tuple = old_tensor.split(channels // num_heads , dim=1 )
_a : Dict = query.reshape(lowerCAmelCase_ )
_a : str = key.reshape(lowerCAmelCase_ )
_a : Optional[int] = value.reshape(lowerCAmelCase_ )
for path in paths:
_a : Dict = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_a : Any = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
_a : str = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
_a : Union[str, Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
_a : int = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_a : List[str] = old_checkpoint[path['old']][:, :, 0]
else:
_a : Dict = old_checkpoint[path['old']]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_a : Optional[int] = {}
_a : Dict = checkpoint['time_embed.0.weight']
_a : Tuple = checkpoint['time_embed.0.bias']
_a : Union[str, Any] = checkpoint['time_embed.2.weight']
_a : List[str] = checkpoint['time_embed.2.bias']
_a : List[str] = checkpoint['input_blocks.0.0.weight']
_a : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_a : Optional[int] = checkpoint['out.0.weight']
_a : int = checkpoint['out.0.bias']
_a : List[str] = checkpoint['out.2.weight']
_a : Optional[int] = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
_a : Dict = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the middle blocks only
_a : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
_a : Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the output blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
_a : str = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
for i in range(1 , lowerCAmelCase_ ):
_a : List[Any] = (i - 1) // (config['num_res_blocks'] + 1)
_a : Optional[int] = (i - 1) % (config['num_res_blocks'] + 1)
_a : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
_a : List[Any] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
_a : Union[str, Any] = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
_a : List[str] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_a : Optional[Any] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ )
if len(lowerCAmelCase_ ):
_a : List[str] = renew_attention_paths(lowerCAmelCase_ )
_a : List[Any] = {
'old': f"""input_blocks.{i}.1""",
'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : Optional[Any] = {
f"""input_blocks.{i}.1.qkv.bias""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , )
_a : str = middle_blocks[0]
_a : Tuple = middle_blocks[1]
_a : Any = middle_blocks[2]
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : int = renew_attention_paths(lowerCAmelCase_ )
_a : int = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
_a : List[str] = i // (config['num_res_blocks'] + 1)
_a : Any = i % (config['num_res_blocks'] + 1)
_a : Union[str, Any] = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]]
_a : Optional[Any] = {}
for layer in output_block_layers:
_a , _a : str = layer.split('.' )[0], shave_segments(lowerCAmelCase_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowerCAmelCase_ )
else:
_a : str = [layer_name]
if len(lowerCAmelCase_ ) > 1:
_a : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
_a : Dict = renew_resnet_paths(lowerCAmelCase_ )
_a : str = renew_resnet_paths(lowerCAmelCase_ )
_a : Optional[int] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_a : List[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
_a : Tuple = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
_a : List[str] = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(lowerCAmelCase_ ) == 2:
_a : Union[str, Any] = []
if len(lowerCAmelCase_ ):
_a : Tuple = renew_attention_paths(lowerCAmelCase_ )
_a : str = {
'old': f"""output_blocks.{i}.1""",
'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : List[Any] = {
f"""output_blocks.{i}.1.qkv.bias""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=lowerCAmelCase_ , )
else:
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_a : int = '.'.join(['output_blocks', str(lowerCAmelCase_ ), path['old']] )
_a : Union[str, Any] = '.'.join(['up_blocks', str(lowerCAmelCase_ ), 'resnets', str(lowerCAmelCase_ ), path['new']] )
_a : Union[str, Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__lowerCAmelCase = json.loads(f.read())
__lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__lowerCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 89
| 1
|
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
UpperCamelCase_ = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
UpperCamelCase_ = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCAmelCase )[0]
@deprecated(UpperCAmelCase , "Please use tf.data to implement this functionality." )
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase ) as bytestream:
a_ = _readaa(UpperCAmelCase )
if magic != 2_051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
a_ = _readaa(UpperCAmelCase )
a_ = _readaa(UpperCAmelCase )
a_ = _readaa(UpperCAmelCase )
a_ = bytestream.read(rows * cols * num_images )
a_ = numpy.frombuffer(UpperCAmelCase , dtype=numpy.uinta )
a_ = data.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , 1 )
return data
@deprecated(UpperCAmelCase , "Please use tf.one_hot on tensors." )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
a_ = labels_dense.shape[0]
a_ = numpy.arange(UpperCAmelCase ) * num_classes
a_ = numpy.zeros((num_labels, num_classes) )
a_ = 1
return labels_one_hot
@deprecated(UpperCAmelCase , "Please use tf.data to implement this functionality." )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=10 ) ->str:
"""simple docstring"""
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase ) as bytestream:
a_ = _readaa(UpperCAmelCase )
if magic != 2_049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
a_ = _readaa(UpperCAmelCase )
a_ = bytestream.read(UpperCAmelCase )
a_ = numpy.frombuffer(UpperCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(UpperCAmelCase , UpperCAmelCase )
return labels
class snake_case :
@deprecated(
__UpperCAmelCase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=dtypes.floataa , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->List[str]:
a_ , a_ = random_seed.get_seed(__UpperCAmelCase)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda)
a_ = dtypes.as_dtype(__UpperCAmelCase).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype)
if fake_data:
a_ = 1_00_00
a_ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'''images.shape: {images.shape} labels.shape: {labels.shape}'''
a_ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
a_ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2])
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
a_ = images.astype(numpy.floataa)
a_ = numpy.multiply(__UpperCAmelCase , 1.0 / 255.0)
a_ = images
a_ = labels
a_ = 0
a_ = 0
@property
def UpperCAmelCase__ ( self) ->List[str]:
return self._images
@property
def UpperCAmelCase__ ( self) ->Any:
return self._labels
@property
def UpperCAmelCase__ ( self) ->Tuple:
return self._num_examples
@property
def UpperCAmelCase__ ( self) ->int:
return self._epochs_completed
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True) ->str:
if fake_data:
a_ = [1] * 7_84
a_ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__UpperCAmelCase)],
[fake_label for _ in range(__UpperCAmelCase)],
)
a_ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
a_ = numpy.arange(self._num_examples)
numpy.random.shuffle(__UpperCAmelCase)
a_ = self.images[perma]
a_ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
a_ = self._num_examples - start
a_ = self._images[start : self._num_examples]
a_ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
a_ = numpy.arange(self._num_examples)
numpy.random.shuffle(__UpperCAmelCase)
a_ = self.images[perm]
a_ = self.labels[perm]
# Start next epoch
a_ = 0
a_ = batch_size - rest_num_examples
a_ = self._index_in_epoch
a_ = self._images[start:end]
a_ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0),
)
else:
self._index_in_epoch += batch_size
a_ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(UpperCAmelCase , "Please write your own downloading logic." )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
if not gfile.Exists(UpperCAmelCase ):
gfile.MakeDirs(UpperCAmelCase )
a_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
if not gfile.Exists(UpperCAmelCase ):
urllib.request.urlretrieve(UpperCAmelCase , UpperCAmelCase ) # noqa: S310
with gfile.GFile(UpperCAmelCase ) as f:
a_ = f.size()
print("Successfully downloaded" , UpperCAmelCase , UpperCAmelCase , "bytes." )
return filepath
@deprecated(
UpperCAmelCase , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=dtypes.floataa , UpperCAmelCase=True , UpperCAmelCase=5_000 , UpperCAmelCase=None , UpperCAmelCase=DEFAULT_SOURCE_URL , ) ->Optional[Any]:
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=UpperCAmelCase , one_hot=UpperCAmelCase , dtype=UpperCAmelCase , seed=UpperCAmelCase )
a_ = fake()
a_ = fake()
a_ = fake()
return _Datasets(train=UpperCAmelCase , validation=UpperCAmelCase , test=UpperCAmelCase )
if not source_url: # empty string check
a_ = DEFAULT_SOURCE_URL
a_ = "train-images-idx3-ubyte.gz"
a_ = "train-labels-idx1-ubyte.gz"
a_ = "t10k-images-idx3-ubyte.gz"
a_ = "t10k-labels-idx1-ubyte.gz"
a_ = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + train_images_file )
with gfile.Open(UpperCAmelCase , "rb" ) as f:
a_ = _extract_images(UpperCAmelCase )
a_ = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + train_labels_file )
with gfile.Open(UpperCAmelCase , "rb" ) as f:
a_ = _extract_labels(UpperCAmelCase , one_hot=UpperCAmelCase )
a_ = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + test_images_file )
with gfile.Open(UpperCAmelCase , "rb" ) as f:
a_ = _extract_images(UpperCAmelCase )
a_ = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + test_labels_file )
with gfile.Open(UpperCAmelCase , "rb" ) as f:
a_ = _extract_labels(UpperCAmelCase , one_hot=UpperCAmelCase )
if not 0 <= validation_size <= len(UpperCAmelCase ):
a_ = (
"Validation size should be between 0 and "
F'''{len(UpperCAmelCase )}. Received: {validation_size}.'''
)
raise ValueError(UpperCAmelCase )
a_ = train_images[:validation_size]
a_ = train_labels[:validation_size]
a_ = train_images[validation_size:]
a_ = train_labels[validation_size:]
a_ = {"dtype": dtype, "reshape": reshape, "seed": seed}
a_ = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
a_ = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
a_ = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
return _Datasets(train=UpperCAmelCase , validation=UpperCAmelCase , test=UpperCAmelCase )
| 303
|
"""simple docstring"""
import math
UpperCamelCase_ = 10
UpperCamelCase_ = 7
UpperCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( UpperCAmelCase = 20 ) ->str:
"""simple docstring"""
a_ = math.comb(UpperCAmelCase , UpperCAmelCase )
a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase )
a_ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 303
| 1
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
lowerCamelCase__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __lowerCAmelCase (_UpperCamelCase ):
with open(_UpperCamelCase , 'rb' ) as f:
__lowerCAmelCase : Optional[int] = Image.open(_UpperCamelCase )
return im.convert('RGB' )
@dataclass
class A__ :
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
A_ : Optional[str] = field(default=_lowerCamelCase , metadata={'help': 'A folder containing the training data.'})
A_ : Optional[str] = field(default=_lowerCamelCase , metadata={'help': 'A folder containing the validation data.'})
A_ : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'})
A_ : Optional[int] = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A_ : Optional[int] = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __lowerCamelCase ( self ):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class A__ :
A_ : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCamelCase)} , )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'})
A_ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A_ : str = field(default=_lowerCamelCase , metadata={'help': 'Name or path of preprocessor config.'})
A_ : bool = field(
default=_lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A_ : bool = field(
default=_lowerCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[Any] = torch.stack([example['pixel_values'] for example in examples] )
__lowerCAmelCase : Dict = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __lowerCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase : str = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__lowerCAmelCase : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__lowerCAmelCase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
__lowerCAmelCase : Union[str, Any] = {}
if data_args.train_dir is not None:
__lowerCAmelCase : Dict = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
__lowerCAmelCase : Union[str, Any] = os.path.join(data_args.validation_dir , '**' )
__lowerCAmelCase : Optional[int] = load_dataset(
'imagefolder' , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
__lowerCAmelCase : Union[str, Any] = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCamelCase ) and data_args.train_val_split > 0.0:
__lowerCAmelCase : Tuple = dataset['train'].train_test_split(data_args.train_val_split )
__lowerCAmelCase : List[str] = split['train']
__lowerCAmelCase : Dict = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__lowerCAmelCase : Any = dataset['train'].features['labels'].names
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = {}, {}
for i, label in enumerate(_UpperCamelCase ):
__lowerCAmelCase : Any = str(_UpperCamelCase )
__lowerCAmelCase : Tuple = label
# Load the accuracy metric from the datasets package
__lowerCAmelCase : Optional[Any] = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
__lowerCAmelCase : int = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase : Optional[int] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__lowerCAmelCase : List[Any] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__lowerCAmelCase : List[Any] = image_processor.size['shortest_edge']
else:
__lowerCAmelCase : Optional[int] = (image_processor.size['height'], image_processor.size['width'])
__lowerCAmelCase : List[Any] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
__lowerCAmelCase : Tuple = Compose(
[
RandomResizedCrop(_UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__lowerCAmelCase : Union[str, Any] = Compose(
[
Resize(_UpperCamelCase ),
CenterCrop(_UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(_UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(_UpperCamelCase ):
__lowerCAmelCase : List[str] = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__lowerCAmelCase : List[str] = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__lowerCAmelCase : List[Any] = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_UpperCamelCase )
# Initalize our trainer
__lowerCAmelCase : Optional[Any] = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
__lowerCAmelCase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase : List[str] = last_checkpoint
__lowerCAmelCase : Dict = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowerCAmelCase : Tuple = trainer.evaluate()
trainer.log_metrics('eval' , _UpperCamelCase )
trainer.save_metrics('eval' , _UpperCamelCase )
# Write model card and (optionally) push to hub
__lowerCAmelCase : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 86
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : List[Any] = ["input_features"]
def __init__( self , A=80 , A=1_60_00 , A=1_60 , A=30 , A=4_00 , A=0.0 , A=False , **A , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , return_attention_mask=A , **A , )
lowerCamelCase = n_fft
lowerCamelCase = hop_length
lowerCamelCase = chunk_length
lowerCamelCase = chunk_length * sampling_rate
lowerCamelCase = self.n_samples // hop_length
lowerCamelCase = sampling_rate
lowerCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=A , norm="""slaney""" , mel_scale="""slaney""" , )
def __A ( self , A ) -> np.ndarray:
'''simple docstring'''
lowerCamelCase = spectrogram(
A , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCamelCase = log_spec[:, :-1]
lowerCamelCase = np.maximum(A , log_spec.max() - 8.0 )
lowerCamelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __A ( A , A , A = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
lowerCamelCase = np.array(A , np.intaa )
lowerCamelCase = []
for vector, length in zip(A , attention_mask.sum(-1 ) ):
lowerCamelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCamelCase = padding_value
normed_input_values.append(A )
else:
lowerCamelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , A , A = True , A = None , A = None , A = None , A = "max_length" , A = None , A = None , A = None , **A , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
lowerCamelCase = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase = [np.asarray([raw_speech] ).T]
lowerCamelCase = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
lowerCamelCase = self.pad(
A , padding=A , max_length=max_length if max_length else self.n_samples , truncation=A , pad_to_multiple_of=A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCamelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCamelCase = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
lowerCamelCase = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
lowerCamelCase = [self._np_extract_fbank_features(A ) for waveform in input_features[0]]
if isinstance(input_features[0] , A ):
lowerCamelCase = [np.asarray(A , dtype=np.floataa ) for feature in input_features]
else:
lowerCamelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCamelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCamelCase = padded_inputs.convert_to_tensors(A )
return padded_inputs
def __A ( self ) -> Dict[str, Any]:
'''simple docstring'''
lowerCamelCase = copy.deepcopy(self.__dict__ )
lowerCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 252
| 0
|
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
A : Optional[int] = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def UpperCamelCase ( __magic_name__ : Dict , __magic_name__ : tuple , __magic_name__ : Path , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : List[Any]=False , ) -> Any:
"""simple docstring"""
output_path.parent.mkdir(parents=__magic_name__ , exist_ok=__magic_name__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__magic_name__ , __magic_name__ , f=output_path.as_posix() , input_names=__magic_name__ , output_names=__magic_name__ , dynamic_axes=__magic_name__ , do_constant_folding=__magic_name__ , use_external_data_format=__magic_name__ , enable_onnx_checker=__magic_name__ , opset_version=__magic_name__ , )
else:
export(
__magic_name__ , __magic_name__ , f=output_path.as_posix() , input_names=__magic_name__ , output_names=__magic_name__ , dynamic_axes=__magic_name__ , do_constant_folding=__magic_name__ , opset_version=__magic_name__ , )
@torch.no_grad()
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int , __magic_name__ : bool = False ) -> List[str]:
"""simple docstring"""
lowercase__ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase__ = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
lowercase__ = """cpu"""
lowercase__ = StableDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=__magic_name__ ).to(__magic_name__ )
lowercase__ = Path(__magic_name__ )
# TEXT ENCODER
lowercase__ = pipeline.text_encoder.config.max_position_embeddings
lowercase__ = pipeline.text_encoder.config.hidden_size
lowercase__ = pipeline.tokenizer(
"""A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=__magic_name__ , return_tensors="""pt""" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=__magic_name__ , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """sequence"""},
} , opset=__magic_name__ , )
del pipeline.text_encoder
# UNET
lowercase__ = pipeline.unet.config.in_channels
lowercase__ = pipeline.unet.config.sample_size
lowercase__ = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , __magic_name__ , __magic_name__ , __magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ ),
torch.randn(2 ).to(device=__magic_name__ , dtype=__magic_name__ ),
torch.randn(2 , __magic_name__ , __magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ ),
False,
) , output_path=__magic_name__ , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""timestep""": {0: """batch"""},
"""encoder_hidden_states""": {0: """batch""", 1: """sequence"""},
} , opset=__magic_name__ , use_external_data_format=__magic_name__ , )
lowercase__ = str(unet_path.absolute().as_posix() )
lowercase__ = os.path.dirname(__magic_name__ )
lowercase__ = onnx.load(__magic_name__ )
# clean up existing tensor files
shutil.rmtree(__magic_name__ )
os.mkdir(__magic_name__ )
# collate external tensor files into one
onnx.save_model(
__magic_name__ , __magic_name__ , save_as_external_data=__magic_name__ , all_tensors_to_one_file=__magic_name__ , location="""weights.pb""" , convert_attribute=__magic_name__ , )
del pipeline.unet
# VAE ENCODER
lowercase__ = pipeline.vae
lowercase__ = vae_encoder.config.in_channels
lowercase__ = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowercase__ = lambda __magic_name__ , __magic_name__ : vae_encoder.encode(__magic_name__ , __magic_name__ )[0].sample()
onnx_export(
__magic_name__ , model_args=(
torch.randn(1 , __magic_name__ , __magic_name__ , __magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ ),
False,
) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__magic_name__ , )
# VAE DECODER
lowercase__ = pipeline.vae
lowercase__ = vae_decoder.config.latent_channels
lowercase__ = vae_decoder.config.out_channels
# forward only through the decoder part
lowercase__ = vae_encoder.decode
onnx_export(
__magic_name__ , model_args=(
torch.randn(1 , __magic_name__ , __magic_name__ , __magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__magic_name__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowercase__ = pipeline.safety_checker
lowercase__ = safety_checker.config.vision_config.num_channels
lowercase__ = safety_checker.config.vision_config.image_size
lowercase__ = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , __magic_name__ , __magic_name__ , __magic_name__ , ).to(device=__magic_name__ , dtype=__magic_name__ ),
torch.randn(1 , __magic_name__ , __magic_name__ , __magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ ),
) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={
"""clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""},
} , opset=__magic_name__ , )
del pipeline.safety_checker
lowercase__ = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" )
lowercase__ = pipeline.feature_extractor
else:
lowercase__ = None
lowercase__ = None
lowercase__ = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(__magic_name__ )
print("""ONNX pipeline saved to""" , __magic_name__ )
del pipeline
del onnx_pipeline
lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(__magic_name__ , provider="""CPUExecutionProvider""" )
print("""ONNX pipeline is loadable""" )
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=1_4,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
A : Any = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 146
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : int = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''roberta-prelayernorm'''
def __init__(self : Dict , _UpperCAmelCase : List[Any]=5_0265 , _UpperCAmelCase : List[Any]=768 , _UpperCAmelCase : str=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Any=3072 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : List[Any]=1E-1_2 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Optional[Any]="absolute" , _UpperCAmelCase : int=True , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : Any , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase__ (self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 146
| 1
|
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'SpeechT5FeatureExtractor'
lowerCamelCase__ = 'SpeechT5Tokenizer'
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__(__a, __a)
def __call__( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = kwargs.pop("audio", __a)
_lowerCAmelCase : Dict = kwargs.pop("text", __a)
_lowerCAmelCase : Dict = kwargs.pop("text_target", __a)
_lowerCAmelCase : Union[str, Any] = kwargs.pop("audio_target", __a)
_lowerCAmelCase : Any = kwargs.pop("sampling_rate", __a)
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?")
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?")
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.")
if audio is not None:
_lowerCAmelCase : Tuple = self.feature_extractor(__a, *__a, sampling_rate=__a, **__a)
elif text is not None:
_lowerCAmelCase : List[Any] = self.tokenizer(__a, **__a)
else:
_lowerCAmelCase : Dict = None
if audio_target is not None:
_lowerCAmelCase : Union[str, Any] = self.feature_extractor(audio_target=__a, *__a, sampling_rate=__a, **__a)
_lowerCAmelCase : Optional[int] = targets["input_values"]
elif text_target is not None:
_lowerCAmelCase : List[Any] = self.tokenizer(__a, **__a)
_lowerCAmelCase : Union[str, Any] = targets["input_ids"]
else:
_lowerCAmelCase : Union[str, Any] = None
if inputs is None:
return targets
if targets is not None:
_lowerCAmelCase : Any = labels
_lowerCAmelCase : List[Any] = targets.get("attention_mask")
if decoder_attention_mask is not None:
_lowerCAmelCase : Tuple = decoder_attention_mask
return inputs
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : List[str] = kwargs.pop("input_values", __a)
_lowerCAmelCase : int = kwargs.pop("input_ids", __a)
_lowerCAmelCase : List[Any] = kwargs.pop("labels", __a)
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs.")
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.")
if input_values is not None:
_lowerCAmelCase : List[str] = self.feature_extractor.pad(__a, *__a, **__a)
elif input_ids is not None:
_lowerCAmelCase : Optional[Any] = self.tokenizer.pad(__a, **__a)
else:
_lowerCAmelCase : List[Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(__a, __a) and "input_ids" in labels[0]):
_lowerCAmelCase : str = self.tokenizer.pad(__a, **__a)
_lowerCAmelCase : str = targets["input_ids"]
else:
_lowerCAmelCase : Union[str, Any] = self.feature_extractor.feature_size
_lowerCAmelCase : str = self.feature_extractor.num_mel_bins
_lowerCAmelCase : str = self.feature_extractor.pad(__a, *__a, **__a)
_lowerCAmelCase : List[Any] = feature_size_hack
_lowerCAmelCase : str = targets["input_values"]
else:
_lowerCAmelCase : Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
_lowerCAmelCase : str = labels
_lowerCAmelCase : List[str] = targets.get("attention_mask")
if decoder_attention_mask is not None:
_lowerCAmelCase : Any = decoder_attention_mask
return inputs
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.batch_decode(*__a, **__a)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.decode(*__a, **__a)
| 36
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
_lowerCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
_lowerCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
_lowerCAmelCase = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(snake_case )-1}' )
if "norm" in key:
_lowerCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
_lowerCAmelCase = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(snake_case )-1}' )
if "layer_norm1" in key:
_lowerCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
_lowerCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
_lowerCAmelCase = key[key.find("""block""" ) + len("""block""" )]
_lowerCAmelCase = key.replace(F'block{idx}' , F'block.{int(snake_case )-1}' )
if "attn.q" in key:
_lowerCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
_lowerCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
_lowerCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
_lowerCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
_lowerCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
_lowerCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
_lowerCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
_lowerCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
_lowerCAmelCase = key.replace(F'linear_c{idx}' , F'linear_c.{int(snake_case )-1}' )
if "bot_conv" in key:
_lowerCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
_lowerCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
_lowerCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
_lowerCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
_lowerCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
_lowerCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
_lowerCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
_lowerCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
_lowerCAmelCase = value
return new_state_dict
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
_lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
_lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
_lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
_lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( snake_case , snake_case , snake_case=False , snake_case=None ):
"""simple docstring"""
_lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_lowerCAmelCase = GLPNImageProcessor()
# prepare image
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=snake_case , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
_lowerCAmelCase = torch.load(snake_case , map_location=torch.device("""cpu""" ) )
# rename keys
_lowerCAmelCase = rename_keys(snake_case )
# key and value matrices need special treatment
read_in_k_v(snake_case , snake_case )
# create HuggingFace model and load state dict
_lowerCAmelCase = GLPNForDepthEstimation(snake_case )
model.load_state_dict(snake_case )
model.eval()
# forward pass
_lowerCAmelCase = model(snake_case )
_lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
_lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
_lowerCAmelCase = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , snake_case , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(snake_case , snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case , snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=snake_case , )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
A__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 82
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : int = ShapEImgaImgPipeline
UpperCAmelCase : Dict = ['''image''']
UpperCAmelCase : str = ['''image''']
UpperCAmelCase : List[Any] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase : int = False
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return 32
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return 32
@property
def lowerCAmelCase_ ( self : Dict ):
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 8
@property
def lowerCAmelCase_ ( self : Any ):
torch.manual_seed(0 )
_A = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
_A = CLIPVisionModel(_UpperCAmelCase )
return model
@property
def lowerCAmelCase_ ( self : List[Any] ):
_A = CLIPImageProcessor(
crop_size=224 , do_center_crop=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_resize=_UpperCAmelCase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
@property
def lowerCAmelCase_ ( self : str ):
torch.manual_seed(0 )
_A = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_A = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowerCAmelCase_ ( self : Any ):
torch.manual_seed(0 )
_A = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
_A = ShapERenderer(**_UpperCAmelCase )
return model
def lowerCAmelCase_ ( self : List[str] ):
_A = self.dummy_prior
_A = self.dummy_image_encoder
_A = self.dummy_image_processor
_A = self.dummy_renderer
_A = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
_A = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=0 ):
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith('mps' ):
_A = torch.manual_seed(_UpperCAmelCase )
else:
_A = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_A = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowerCAmelCase_ ( self : Dict ):
_A = 'cpu'
_A = self.get_dummy_components()
_A = self.pipeline_class(**_UpperCAmelCase )
_A = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_A = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
_A = output.images[0]
_A = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_A = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self : Dict ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase_ ( self : List[Any] ):
_A = torch_device == 'cpu'
_A = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowerCAmelCase_ ( self : int ):
_A = self.get_dummy_components()
_A = self.pipeline_class(**_UpperCAmelCase )
_A = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_A = 1
_A = 2
_A = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
_A = batch_size * [inputs[key]]
_A = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : str ):
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
_A = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
_A = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_A = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
_A = pipe(
_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 354
|
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
a = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
a = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
a = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
a = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
a = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
a = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
def _snake_case ( _snake_case : Dict ) -> int:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def _snake_case ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Dict , _snake_case : Dict , _snake_case : Optional[Any]=False ) -> List[str]:
'''simple docstring'''
_A = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_A = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_A = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_A = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_A = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_A = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_A = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_A = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_A = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_A = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_A = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_A = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def _snake_case ( _snake_case : List[Any] , _snake_case : int , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : int=None ) -> Optional[int]:
'''simple docstring'''
_A , _A , _A = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_A , _A , _A = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_A = checkpoint[F'''{old_prefix}.norm.weight''']
_A = checkpoint[F'''{old_prefix}.norm.bias''']
_A = weight_q.squeeze(-1 ).squeeze(-1 )
_A = bias_q.squeeze(-1 ).squeeze(-1 )
_A = weight_k.squeeze(-1 ).squeeze(-1 )
_A = bias_k.squeeze(-1 ).squeeze(-1 )
_A = weight_v.squeeze(-1 ).squeeze(-1 )
_A = bias_v.squeeze(-1 ).squeeze(-1 )
_A = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_A = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _snake_case ( _snake_case : str , _snake_case : Any ) -> str:
'''simple docstring'''
_A = torch.load(_snake_case , map_location='cpu' )
_A = {}
_A = checkpoint['time_embed.0.weight']
_A = checkpoint['time_embed.0.bias']
_A = checkpoint['time_embed.2.weight']
_A = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_A = checkpoint['label_emb.weight']
_A = checkpoint['input_blocks.0.0.weight']
_A = checkpoint['input_blocks.0.0.bias']
_A = unet_config['down_block_types']
_A = unet_config['layers_per_block']
_A = unet_config['attention_head_dim']
_A = unet_config['block_out_channels']
_A = 1
_A = channels_list[0]
for i, layer_type in enumerate(_snake_case ):
_A = channels_list[i]
_A = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_snake_case ):
_A = F'''down_blocks.{i}.resnets.{j}'''
_A = F'''input_blocks.{current_layer}.0'''
_A = True if j == 0 and downsample_block_has_skip else False
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_snake_case ):
_A = F'''down_blocks.{i}.resnets.{j}'''
_A = F'''input_blocks.{current_layer}.0'''
_A = True if j == 0 and downsample_block_has_skip else False
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
_A = F'''down_blocks.{i}.attentions.{j}'''
_A = F'''input_blocks.{current_layer}.1'''
_A = convert_attention(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
current_layer += 1
if i != len(_snake_case ) - 1:
_A = F'''down_blocks.{i}.downsamplers.0'''
_A = F'''input_blocks.{current_layer}.0'''
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
current_layer += 1
_A = current_channels
# hardcoded the mid-block for now
_A = 'mid_block.resnets.0'
_A = 'middle_block.0'
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
_A = 'mid_block.attentions.0'
_A = 'middle_block.1'
_A = convert_attention(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
_A = 'mid_block.resnets.1'
_A = 'middle_block.2'
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
_A = 0
_A = unet_config['up_block_types']
for i, layer_type in enumerate(_snake_case ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_A = F'''up_blocks.{i}.resnets.{j}'''
_A = F'''output_blocks.{current_layer}.0'''
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
current_layer += 1
if i != len(_snake_case ) - 1:
_A = F'''up_blocks.{i}.upsamplers.0'''
_A = F'''output_blocks.{current_layer-1}.1'''
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_A = F'''up_blocks.{i}.resnets.{j}'''
_A = F'''output_blocks.{current_layer}.0'''
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
_A = F'''up_blocks.{i}.attentions.{j}'''
_A = F'''output_blocks.{current_layer}.1'''
_A = convert_attention(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
current_layer += 1
if i != len(_snake_case ) - 1:
_A = F'''up_blocks.{i}.upsamplers.0'''
_A = F'''output_blocks.{current_layer-1}.2'''
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
_A = checkpoint['out.0.weight']
_A = checkpoint['out.0.bias']
_A = checkpoint['out.2.weight']
_A = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
a = parser.parse_args()
a = strabool(args.class_cond)
a = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
a = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
a = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
a = None
a = con_pt_to_diffuser(args.unet_path, unet_config)
a = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
a = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
a = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
a = CMStochasticIterativeScheduler(**scheduler_config)
a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 271
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'realm'
def __init__( self : Dict,lowercase_ : Any=3_0_5_2_2,lowercase_ : Tuple=7_6_8,lowercase_ : List[str]=1_2_8,lowercase_ : List[Any]=1_2,lowercase_ : str=1_2,lowercase_ : Union[str, Any]=8,lowercase_ : int=3_0_7_2,lowercase_ : List[str]="gelu_new",lowercase_ : str=0.1,lowercase_ : Optional[Any]=0.1,lowercase_ : Dict=5_1_2,lowercase_ : Dict=2,lowercase_ : Optional[Any]=0.02,lowercase_ : List[str]=1E-12,lowercase_ : Any=2_5_6,lowercase_ : Optional[int]=1_0,lowercase_ : List[Any]=1E-3,lowercase_ : int=5,lowercase_ : int=3_2_0,lowercase_ : int=1_3_3_5_3_7_1_8,lowercase_ : int=5_0_0_0,lowercase_ : Tuple=1,lowercase_ : Union[str, Any]=0,lowercase_ : Dict=2,**lowercase_ : Dict,)-> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,**lowercase_ )
# Common config
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = retriever_proj_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_candidates
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = type_vocab_size
A__ = layer_norm_eps
# Reader config
A__ = span_hidden_size
A__ = max_span_width
A__ = reader_layer_norm_eps
A__ = reader_beam_size
A__ = reader_seq_len
# Retrieval config
A__ = num_block_records
A__ = searcher_beam_size
| 7
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class A ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'resnet'
lowerCamelCase = ['basic', 'bottleneck']
def __init__( self : Optional[Any],lowercase_ : int=3,lowercase_ : List[str]=6_4,lowercase_ : int=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8],lowercase_ : Tuple=[3, 4, 6, 3],lowercase_ : Union[str, Any]="bottleneck",lowercase_ : List[str]="relu",lowercase_ : Tuple=False,lowercase_ : List[str]=None,lowercase_ : List[Any]=None,**lowercase_ : str,)-> Optional[Any]:
'''simple docstring'''
super().__init__(**lowercase_ )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
A__ = num_channels
A__ = embedding_size
A__ = hidden_sizes
A__ = depths
A__ = layer_type
A__ = hidden_act
A__ = downsample_in_first_stage
A__ = ['stem'] + [F'stage{idx}' for idx in range(1,len(lowercase_ ) + 1 )]
A__ , A__ = get_aligned_output_features_output_indices(
out_features=lowercase_,out_indices=lowercase_,stage_names=self.stage_names )
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = version.parse('1.11' )
@property
def snake_case__ ( self : List[Any] )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case__ ( self : Any )-> float:
'''simple docstring'''
return 1E-3
| 7
| 1
|
def __a ( UpperCAmelCase = 1000 ) ->int:
"""simple docstring"""
A = 2**power
A = 0
while n:
A , A = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 355
|
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
return [ord(UpperCAmelCase ) - 96 for elem in plain]
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def __a ( ) ->None:
"""simple docstring"""
A = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , UpperCAmelCase )
print("""Decoded:""" , decode(UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 337
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__SCREAMING_SNAKE_CASE : Tuple = model(_A )['''last_hidden_state''']
__SCREAMING_SNAKE_CASE : int = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _A )
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE : Dict = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 303
|
from heapq import heappop, heappush
import numpy as np
def a__ ( snake_case , snake_case , snake_case , snake_case , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = grid.shape
__SCREAMING_SNAKE_CASE : Tuple = [-1, 1, 0, 0]
__SCREAMING_SNAKE_CASE : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = [(0, source)], set()
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.full((rows, cols) , np.inf )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.empty((rows, cols) , dtype=snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = None
while queue:
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Any = heappop(snake_case )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__SCREAMING_SNAKE_CASE : int = []
while (x, y) != source:
path.append((x, y) )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = predecessors[x, y]
path.append(snake_case ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(snake_case ) ):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__SCREAMING_SNAKE_CASE : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(snake_case , (dist + 1, (nx, ny)) )
__SCREAMING_SNAKE_CASE : int = dist + 1
__SCREAMING_SNAKE_CASE : Dict = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
| 1
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {"vocab_file": "vocab.txt"}
snake_case_ : int = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
snake_case_ : Dict = {
"openbmb/cpm-ant-10b": 1024,
}
def A (__A : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ = collections.OrderedDict()
with open(__A , '''r''' , encoding='''utf-8''' ) as reader:
UpperCAmelCase_ = reader.readlines()
for index, token in enumerate(__A ):
UpperCAmelCase_ = token.rstrip('''\n''' )
UpperCAmelCase_ = index
return vocab
class __snake_case ( a ):
def __init__( self : str , _snake_case : Tuple , _snake_case : List[str]="<unk>" , _snake_case : Optional[int]=200):
"""simple docstring"""
UpperCAmelCase_ = vocab
UpperCAmelCase_ = unk_token
UpperCAmelCase_ = max_input_chars_per_word
def lowerCamelCase ( self : Optional[Any] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = list(_snake_case)
if len(_snake_case) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
while start < len(_snake_case):
UpperCAmelCase_ = len(_snake_case)
UpperCAmelCase_ = None
while start < end:
UpperCAmelCase_ = ''''''.join(chars[start:end])
if substr in self.vocab:
UpperCAmelCase_ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(_snake_case)
UpperCAmelCase_ = end
return sub_tokens
class __snake_case ( a ):
UpperCAmelCase__ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ : List[Any] = False
def __init__( self : List[str] , _snake_case : Union[str, Any] , _snake_case : Optional[int]="<d>" , _snake_case : Tuple="</d>" , _snake_case : int="<s>" , _snake_case : Tuple="</s>" , _snake_case : List[str]="<pad>" , _snake_case : Optional[int]="<unk>" , _snake_case : Optional[Any]="</n>" , _snake_case : str="</_>" , _snake_case : Any="left" , **_snake_case : Optional[int] , ):
"""simple docstring"""
requires_backends(self , ['''jieba'''])
super().__init__(
bod_token=_snake_case , eod_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , pad_token=_snake_case , unk_token=_snake_case , line_token=_snake_case , space_token=_snake_case , padding_side=_snake_case , **_snake_case , )
UpperCAmelCase_ = bod_token
UpperCAmelCase_ = eod_token
UpperCAmelCase_ = load_vocab(_snake_case)
UpperCAmelCase_ = self.encoder[space_token]
UpperCAmelCase_ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCAmelCase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _snake_case: x[1]))
UpperCAmelCase_ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token)
@property
def lowerCamelCase ( self : int):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def lowerCamelCase ( self : int):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return self.encoder["\n"]
@property
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
return len(self.encoder)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def lowerCamelCase ( self : List[str] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = []
for x in jieba.cut(_snake_case , cut_all=_snake_case):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_snake_case))
return output_tokens
def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any] , **_snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = [i for i in token_ids if i >= 0]
UpperCAmelCase_ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_snake_case , **_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Union[str, Any]):
"""simple docstring"""
return token in self.encoder
def lowerCamelCase ( self : List[str] , _snake_case : List[str]):
"""simple docstring"""
return "".join(_snake_case)
def lowerCamelCase ( self : Optional[Any] , _snake_case : Dict):
"""simple docstring"""
return self.encoder.get(_snake_case , self.encoder.get(self.unk_token))
def lowerCamelCase ( self : Optional[int] , _snake_case : Any):
"""simple docstring"""
return self.decoder.get(_snake_case , self.unk_token)
def lowerCamelCase ( self : Tuple , _snake_case : str , _snake_case : Optional[str] = None):
"""simple docstring"""
if os.path.isdir(_snake_case):
UpperCAmelCase_ = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
UpperCAmelCase_ = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
UpperCAmelCase_ = 0
if " " in self.encoder:
UpperCAmelCase_ = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCAmelCase_ = self.encoder['''\n''']
del self.encoder["\n"]
UpperCAmelCase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _snake_case: x[1]))
with open(_snake_case , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''')
UpperCAmelCase_ = token_index
writer.write(token + '''\n''')
index += 1
return (vocab_file,)
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[int] , _snake_case : List[int] = None):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCamelCase ( self : List[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case)
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case)) + [1] + ([0] * len(_snake_case))
return [1] + ([0] * len(_snake_case))
| 7
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case_ : int = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
snake_case_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
snake_case_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
snake_case_ : Union[str, Any] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def A (__A : List[Any] , __A : Optional[int] , __A : str , __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
UpperCAmelCase_ = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __A , )
is not None
):
UpperCAmelCase_ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCAmelCase_ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCAmelCase_ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
UpperCAmelCase_ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
UpperCAmelCase_ = True
if not attribute_used:
UpperCAmelCase_ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCAmelCase_ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCAmelCase_ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCAmelCase_ = True
elif attribute.endswith('''_token_id''' ):
UpperCAmelCase_ = True
# configuration class specific cases
if not case_allowed:
UpperCAmelCase_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCAmelCase_ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A (__A : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCAmelCase_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
UpperCAmelCase_ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCAmelCase_ = {}
if len(config_class.attribute_map ) > 0:
UpperCAmelCase_ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCAmelCase_ = inspect.getsourcefile(__A )
UpperCAmelCase_ = os.path.dirname(__A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCAmelCase_ = [os.path.join(__A , __A ) for fn in os.listdir(__A ) if fn.startswith('''modeling_''' )]
# Get the source code strings
UpperCAmelCase_ = []
for path in modeling_paths:
if os.path.isfile(__A ):
with open(__A ) as fp:
modeling_sources.append(fp.read() )
UpperCAmelCase_ = []
for config_param, default_value in zip(__A , __A ):
# `attributes` here is all the variant names for `config_param`
UpperCAmelCase_ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__A , __A , __A , __A ):
unused_attributes.append(attributes[0] )
return sorted(__A )
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCAmelCase_ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(__A )
and issubclass(__A , __A )
and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCAmelCase_ = check_config_attributes_being_used(__A )
if len(__A ) > 0:
UpperCAmelCase_ = unused_attributes
if len(__A ) > 0:
UpperCAmelCase_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(__A )
if __name__ == "__main__":
check_config_attributes()
| 7
| 1
|
def _a ( SCREAMING_SNAKE_CASE : int = 1000 ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = 3
UpperCamelCase__ : List[str] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 146
|
import cmath
import math
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = math.radians(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = math.radians(SCREAMING_SNAKE_CASE )
# Convert voltage and current to rectangular form
UpperCamelCase__ : str = cmath.rect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = cmath.rect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 362
|
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: List[Any] = CanineTokenizer
lowerCamelCase__: Optional[int] = False
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
super().setUp()
__UpperCAmelCase : Tuple = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
return CanineTokenizer.from_pretrained("google/canine-s" )
def _lowerCamelCase ( self: Any , **__lowerCamelCase: List[Any] ) -> CanineTokenizer:
__UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 10_24
return tokenizer
@require_torch
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : Union[str, Any] = self.canine_tokenizer
__UpperCAmelCase : List[str] = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
__UpperCAmelCase : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : Dict = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , __lowerCamelCase )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertIn("token_type_ids" , __lowerCamelCase )
@require_torch
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : int = [
"What's the weater?",
"It's about 25 degrees.",
]
__UpperCAmelCase : List[Any] = tokenizer(
text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
# safety check on max_len default value so we are sure the test works
__UpperCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCAmelCase : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : int = tempfile.mkdtemp()
__UpperCAmelCase : List[Any] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : Tuple = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Dict = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : List[Any] = tempfile.mkdtemp()
__UpperCAmelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : str = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__UpperCAmelCase : Tuple = chr(0xE_0_0_7 )
additional_special_tokens.append(__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__UpperCAmelCase : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : str = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertIn(__lowerCamelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Optional[int]:
__UpperCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.get_clean_sequence(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : int = 0xE_0_0_5
__UpperCAmelCase : Tuple = chr(__lowerCamelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
__UpperCAmelCase : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : int = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , input_encoded + special_token_id )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertTrue(special_token not in decoded )
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : Optional[int] = chr(0xE_0_0_5 )
__UpperCAmelCase : List[str] = chr(0xE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
__UpperCAmelCase : Tuple = tokenizer.tokenize(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(token_a[0] , __lowerCamelCase )
self.assertEqual(token_a[0] , __lowerCamelCase )
@require_tokenizers
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
__UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Union[str, Any] = 0xE_0_0_6
__UpperCAmelCase : int = chr(__lowerCamelCase )
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowerCamelCase )
tokenizer.from_pretrained(__lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> List[str]:
__UpperCAmelCase : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Tuple = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Optional[int] = json.load(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Any = 0xE_0_0_6
__UpperCAmelCase : Union[str, Any] = chr(__lowerCamelCase )
__UpperCAmelCase : Dict = [new_token_a]
__UpperCAmelCase : int = [new_token_a]
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCAmelCase : List[str] = tokenizer_class.from_pretrained(__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__UpperCAmelCase : List[Any] = 0xE_0_0_7
__UpperCAmelCase : List[Any] = chr(__lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCAmelCase : str = [AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )]
__UpperCAmelCase : Dict = tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : int = "hello world"
if self.space_between_special_tokens:
__UpperCAmelCase : Any = "[CLS] hello world [SEP]"
else:
__UpperCAmelCase : Union[str, Any] = input
__UpperCAmelCase : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Any = tokenizer.decode(__lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowerCamelCase , [output, output.lower()] )
def _lowerCamelCase ( self: Dict ) -> Any:
__UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : List[str] = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
__UpperCAmelCase : List[str] = "a"
__UpperCAmelCase : Any = ord(__lowerCamelCase )
for attr in attributes_list:
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [] )
__UpperCAmelCase : Tuple = 0xE_0_0_6
__UpperCAmelCase : Optional[Any] = chr(__lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
pass
def _lowerCamelCase ( self: Any ) -> Any:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: Optional[int] ) -> Any:
pass
def _lowerCamelCase ( self: List[str] ) -> str:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]:
pass
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: str ) -> Tuple:
pass
| 342
| 0
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowercase ( _snake_case : List[Any] ) ->Tuple:
"""simple docstring"""
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
def lowercase ( _snake_case : str ) ->Optional[int]:
"""simple docstring"""
for char in word:
__snake_case : Union[str, Any] = ord(__a )
if not _is_chinese_char(__a ):
return 0
return 1
def lowercase ( _snake_case : List[str] ) ->List[str]:
"""simple docstring"""
__snake_case : Dict = set()
for token in tokens:
__snake_case : str = len(__a ) > 1 and is_chinese(__a )
if chinese_word:
word_set.add(__a )
__snake_case : Optional[Any] = list(__a )
return word_list
def lowercase ( _snake_case : List[str] , _snake_case : set() ) ->List[str]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
__snake_case : Optional[Any] = max([len(__a ) for w in chinese_word_set] )
__snake_case : Optional[int] = bert_tokens
__snake_case : Any = 0, len(__a )
while start < end:
__snake_case : Tuple = True
if is_chinese(bert_word[start] ):
__snake_case : Union[str, Any] = min(end - start , __a )
for i in range(__a , 1 , -1 ):
__snake_case : Optional[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__snake_case : Any = '##' + bert_word[j]
__snake_case : Union[str, Any] = start + i
__snake_case : int = False
break
if single_word:
start += 1
return bert_word
def lowercase ( _snake_case : List[str] , _snake_case : LTP , _snake_case : BertTokenizer ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : int = []
for i in range(0 , len(__a ) , 100 ):
__snake_case : Union[str, Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__snake_case : Optional[Any] = [get_chinese_word(__a ) for r in res]
ltp_res.extend(__a )
assert len(__a ) == len(__a )
__snake_case : str = []
for i in range(0 , len(__a ) , 100 ):
__snake_case : List[str] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__a , truncation=__a , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(__a ) == len(__a )
__snake_case : List[str] = []
for input_ids, chinese_word in zip(__a , __a ):
__snake_case : int = []
for id in input_ids:
__snake_case : Optional[int] = bert_tokenizer._convert_id_to_token(__a )
input_tokens.append(__a )
__snake_case : List[str] = add_sub_symbol(__a , __a )
__snake_case : Tuple = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__a ):
if token[:2] == "##":
__snake_case : str = token[2:]
# save chinese tokens' pos
if len(__a ) == 1 and _is_chinese_char(ord(__a ) ):
ref_id.append(__a )
ref_ids.append(__a )
assert len(__a ) == len(__a )
return ref_ids
def lowercase ( _snake_case : Optional[Any] ) ->Any:
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
__snake_case : Dict = f.readlines()
__snake_case : int = [line.strip() for line in data if len(__a ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__snake_case : int = LTP(args.ltp ) # faster in GPU device
__snake_case : Tuple = BertTokenizer.from_pretrained(args.bert )
__snake_case : int = prepare_ref(__a , __a , __a )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
__snake_case : Optional[Any] = [json.dumps(__a ) + '\n' for ref in ref_ids]
f.writelines(__a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
main(args)
| 102
|
'''simple docstring'''
def UpperCAmelCase_ (__a : list , __a : list , __a : int ):
"""simple docstring"""
_a : Optional[Any] = len(__a )
_a : int = [[0] * n for i in range(__a )]
for i in range(__a ):
_a : Tuple = y_points[i]
for i in range(2 , __a ):
for j in range(__a , __a ):
_a : Tuple = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def UpperCamelCase ( __lowercase : Any ) -> Optional[Any]:
'''simple docstring'''
A_ : List[str] = 3_84
if "tiny" in model_name:
A_ : int = [3, 3, 9, 3]
A_ : Dict = [96, 1_92, 3_84, 7_68]
if "small" in model_name:
A_ : Any = [3, 3, 27, 3]
A_ : Union[str, Any] = [96, 1_92, 3_84, 7_68]
if "base" in model_name:
A_ : str = [3, 3, 27, 3]
A_ : List[str] = [1_28, 2_56, 5_12, 10_24]
A_ : Optional[Any] = 5_12
if "large" in model_name:
A_ : str = [3, 3, 27, 3]
A_ : List[str] = [1_92, 3_84, 7_68, 15_36]
A_ : Union[str, Any] = 7_68
if "xlarge" in model_name:
A_ : Optional[Any] = [3, 3, 27, 3]
A_ : List[str] = [2_56, 5_12, 10_24, 20_48]
A_ : int = 10_24
# set label information
A_ : Tuple = 1_50
A_ : Tuple = 'huggingface/label-files'
A_ : Optional[int] = 'ade20k-id2label.json'
A_ : Tuple = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) )
A_ : List[Any] = {int(__lowercase ): v for k, v in idalabel.items()}
A_ : List[Any] = {v: k for k, v in idalabel.items()}
A_ : Tuple = ConvNextConfig(
depths=__lowercase ,hidden_sizes=__lowercase ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
A_ : Optional[int] = UperNetConfig(
backbone_config=__lowercase ,auxiliary_in_channels=__lowercase ,num_labels=__lowercase ,idalabel=__lowercase ,labelaid=__lowercase ,)
return config
def UpperCamelCase ( __lowercase : List[str] ) -> Dict:
'''simple docstring'''
A_ : int = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : Optional[int] ,__lowercase : int ) -> Optional[int]:
'''simple docstring'''
A_ : Any = dct.pop(__lowercase )
A_ : Union[str, Any] = val
def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : Tuple ,__lowercase : int ) -> Dict:
'''simple docstring'''
A_ : Optional[Any] = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
A_ : Tuple = model_name_to_url[model_name]
A_ : List[str] = torch.hub.load_state_dict_from_url(__lowercase ,map_location='cpu' )['state_dict']
A_ : Tuple = get_upernet_config(__lowercase )
A_ : Tuple = UperNetForSemanticSegmentation(__lowercase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A_ : Optional[Any] = state_dict.pop(__lowercase )
if "bn" in key:
A_ : List[str] = key.replace('bn' ,'batch_norm' )
A_ : List[Any] = val
# rename keys
A_ : Optional[int] = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase ,__lowercase ,__lowercase )
model.load_state_dict(__lowercase )
# verify on image
A_ : List[Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
A_ : List[Any] = Image.open(requests.get(__lowercase ,stream=__lowercase ).raw ).convert('RGB' )
A_ : Tuple = SegformerImageProcessor()
A_ : List[str] = processor(__lowercase ,return_tensors='pt' ).pixel_values
with torch.no_grad():
A_ : List[Any] = model(__lowercase )
if model_name == "upernet-convnext-tiny":
A_ : Tuple = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] )
elif model_name == "upernet-convnext-small":
A_ : Optional[Any] = torch.tensor(
[[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] )
elif model_name == "upernet-convnext-base":
A_ : str = torch.tensor(
[[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] )
elif model_name == "upernet-convnext-large":
A_ : Optional[int] = torch.tensor(
[[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] )
elif model_name == "upernet-convnext-xlarge":
A_ : Optional[Any] = torch.tensor(
[[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] )
print('Logits:' ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,__lowercase ,atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__lowercase )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 370
|
import math
def UpperCamelCase ( __lowercase : int = 1_00 ):
'''simple docstring'''
A_ : List[Any] = sum(i * i for i in range(1 ,n + 1 ) )
A_ : int = int(math.pow(sum(range(1 ,n + 1 ) ) ,2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 192
| 0
|
A_ :Union[str, Any] = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def A ( a_ ) -> str:
assert type(a_ ) in (int, float) and decimal == int(a_ )
__UpperCamelCase : Union[str, Any] =int(a_ )
__UpperCamelCase : List[str] =''
__UpperCamelCase : Optional[Any] =False
if decimal < 0:
__UpperCamelCase : Tuple =True
decimal *= -1
while decimal > 0:
__UpperCamelCase , __UpperCamelCase : Optional[Any] =divmod(a_ ,16 )
__UpperCamelCase : Tuple =values[remainder] + hexadecimal
__UpperCamelCase : Dict ='0x' + hexadecimal
if negative:
__UpperCamelCase : int ='-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__a = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowercase : Any = self.diffusers_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCamelCase ( self ):
lowercase : List[Any] = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowercase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowercase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowercase : List[Any] = black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , newline='''\n''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
self.assertTrue(f.read() , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Tuple = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with a really long name
lowercase : List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
| 337
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case : Optional[int] = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 122
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : List[Any] = '''microsoft/speecht5_tts'''
_SCREAMING_SNAKE_CASE : Any = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
_SCREAMING_SNAKE_CASE : int = '''text_reader'''
_SCREAMING_SNAKE_CASE : List[str] = SpeechTaProcessor
_SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaForTextToSpeech
_SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGan
_SCREAMING_SNAKE_CASE : Optional[int] = ['''text''']
_SCREAMING_SNAKE_CASE : List[Any] = ['''audio''']
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.post_processor is None:
lowerCAmelCase__ = 'microsoft/speecht5_hifigan'
super().setup()
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowerCAmelCase__ = self.pre_processor(text=_UpperCamelCase , return_tensors='pt' , truncation=_UpperCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
lowerCAmelCase__ = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' )
lowerCAmelCase__ = torch.tensor(embeddings_dataset[73_05]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
with torch.no_grad():
return self.post_processor(_UpperCamelCase ).cpu().detach()
| 122
| 1
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.txt"}
lowercase_ = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
lowercase_ = {
"openbmb/cpm-ant-10b": 1024,
}
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as reader:
A__ = reader.readlines()
for index, token in enumerate(SCREAMING_SNAKE_CASE__ ):
A__ = token.rstrip('\n' )
A__ = index
return vocab
class A ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any],lowercase_ : Any,lowercase_ : Any="<unk>",lowercase_ : List[Any]=2_0_0 )-> int:
'''simple docstring'''
A__ = vocab
A__ = unk_token
A__ = max_input_chars_per_word
def snake_case__ ( self : int,lowercase_ : Any )-> int:
'''simple docstring'''
A__ = list(lowercase_ )
if len(lowercase_ ) > self.max_input_chars_per_word:
return [self.unk_token]
A__ = 0
A__ = []
while start < len(lowercase_ ):
A__ = len(lowercase_ )
A__ = None
while start < end:
A__ = ''.join(chars[start:end] )
if substr in self.vocab:
A__ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowercase_ )
A__ = end
return sub_tokens
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['input_ids', 'attention_mask']
lowerCamelCase = False
def __init__( self : str,lowercase_ : List[str],lowercase_ : Dict="<d>",lowercase_ : int="</d>",lowercase_ : int="<s>",lowercase_ : Optional[Any]="</s>",lowercase_ : Optional[Any]="<pad>",lowercase_ : Optional[int]="<unk>",lowercase_ : Any="</n>",lowercase_ : Dict="</_>",lowercase_ : Optional[int]="left",**lowercase_ : int,)-> int:
'''simple docstring'''
requires_backends(self,['jieba'] )
super().__init__(
bod_token=lowercase_,eod_token=lowercase_,bos_token=lowercase_,eos_token=lowercase_,pad_token=lowercase_,unk_token=lowercase_,line_token=lowercase_,space_token=lowercase_,padding_side=lowercase_,**lowercase_,)
A__ = bod_token
A__ = eod_token
A__ = load_vocab(lowercase_ )
A__ = self.encoder[space_token]
A__ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
A__ = collections.OrderedDict(sorted(self.encoder.items(),key=lambda lowercase_ : x[1] ) )
A__ = {v: k for k, v in self.encoder.items()}
A__ = WordpieceTokenizer(vocab=self.encoder,unk_token=self.unk_token )
@property
def snake_case__ ( self : Optional[Any] )-> Any:
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def snake_case__ ( self : List[str] )-> Dict:
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def snake_case__ ( self : Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
return self.encoder["\n"]
@property
def snake_case__ ( self : Union[str, Any] )-> int:
'''simple docstring'''
return len(self.encoder )
def snake_case__ ( self : int )-> str:
'''simple docstring'''
return dict(self.encoder,**self.added_tokens_encoder )
def snake_case__ ( self : Union[str, Any],lowercase_ : Union[str, Any] )-> str:
'''simple docstring'''
A__ = []
for x in jieba.cut(lowercase_,cut_all=lowercase_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowercase_ ) )
return output_tokens
def snake_case__ ( self : List[str],lowercase_ : Optional[Any],**lowercase_ : int )-> int:
'''simple docstring'''
A__ = [i for i in token_ids if i >= 0]
A__ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowercase_,**lowercase_ )
def snake_case__ ( self : Optional[int],lowercase_ : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
return token in self.encoder
def snake_case__ ( self : List[str],lowercase_ : List[str] )-> str:
'''simple docstring'''
return "".join(lowercase_ )
def snake_case__ ( self : Optional[Any],lowercase_ : List[Any] )-> Any:
'''simple docstring'''
return self.encoder.get(lowercase_,self.encoder.get(self.unk_token ) )
def snake_case__ ( self : Union[str, Any],lowercase_ : List[str] )-> List[str]:
'''simple docstring'''
return self.decoder.get(lowercase_,self.unk_token )
def snake_case__ ( self : Any,lowercase_ : str,lowercase_ : Optional[str] = None )-> Tuple[str]:
'''simple docstring'''
if os.path.isdir(lowercase_ ):
A__ = os.path.join(
lowercase_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
A__ = (filename_prefix + '-' if filename_prefix else '') + save_directory
A__ = 0
if " " in self.encoder:
A__ = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
A__ = self.encoder['\n']
del self.encoder["\n"]
A__ = collections.OrderedDict(sorted(self.encoder.items(),key=lambda lowercase_ : x[1] ) )
with open(lowercase_,'w',encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
A__ = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def snake_case__ ( self : Optional[int],lowercase_ : List[int],lowercase_ : List[int] = None )-> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def snake_case__ ( self : Optional[Any],lowercase_ : List[int],lowercase_ : Optional[List[int]] = None,lowercase_ : bool = False )-> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_,token_ids_a=lowercase_,already_has_special_tokens=lowercase_ )
if token_ids_a is not None:
return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ ))
return [1] + ([0] * len(lowercase_ ))
| 7
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class A ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'resnet'
lowerCamelCase = ['basic', 'bottleneck']
def __init__( self : Optional[Any],lowercase_ : int=3,lowercase_ : List[str]=6_4,lowercase_ : int=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8],lowercase_ : Tuple=[3, 4, 6, 3],lowercase_ : Union[str, Any]="bottleneck",lowercase_ : List[str]="relu",lowercase_ : Tuple=False,lowercase_ : List[str]=None,lowercase_ : List[Any]=None,**lowercase_ : str,)-> Optional[Any]:
'''simple docstring'''
super().__init__(**lowercase_ )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
A__ = num_channels
A__ = embedding_size
A__ = hidden_sizes
A__ = depths
A__ = layer_type
A__ = hidden_act
A__ = downsample_in_first_stage
A__ = ['stem'] + [F'stage{idx}' for idx in range(1,len(lowercase_ ) + 1 )]
A__ , A__ = get_aligned_output_features_output_indices(
out_features=lowercase_,out_indices=lowercase_,stage_names=self.stage_names )
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = version.parse('1.11' )
@property
def snake_case__ ( self : List[Any] )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case__ ( self : Any )-> float:
'''simple docstring'''
return 1E-3
| 7
| 1
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Optional[Any] = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "detr"
lowercase_ = ["past_key_values"]
lowercase_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[Any] , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=3 , _lowerCAmelCase : Optional[Any]=100 , _lowerCAmelCase : Dict=6 , _lowerCAmelCase : str=2_048 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Optional[int]=6 , _lowerCAmelCase : Union[str, Any]=2_048 , _lowerCAmelCase : Union[str, Any]=8 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Tuple="relu" , _lowerCAmelCase : Any=256 , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Optional[Any]=1.0 , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Optional[int]="sine" , _lowerCAmelCase : Optional[int]="resnet50" , _lowerCAmelCase : Any=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : Optional[int]=1 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : Dict=5 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Any=0.1 , **_lowerCAmelCase : Optional[int] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = backbone_config.get('model_type' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(_lowerCAmelCase )
# set timm attributes to None
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None, None, None
SCREAMING_SNAKE_CASE_ = use_timm_backbone
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = auxiliary_loss
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = backbone
SCREAMING_SNAKE_CASE_ = use_pretrained_backbone
SCREAMING_SNAKE_CASE_ = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE_ = class_cost
SCREAMING_SNAKE_CASE_ = bbox_cost
SCREAMING_SNAKE_CASE_ = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE_ = mask_loss_coefficient
SCREAMING_SNAKE_CASE_ = dice_loss_coefficient
SCREAMING_SNAKE_CASE_ = bbox_loss_coefficient
SCREAMING_SNAKE_CASE_ = giou_loss_coefficient
SCREAMING_SNAKE_CASE_ = eos_coefficient
super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return self.encoder_attention_heads
@property
def lowerCAmelCase_ ( self : int ):
return self.d_model
@classmethod
def lowerCAmelCase_ ( cls : Tuple , _lowerCAmelCase : PretrainedConfig , **_lowerCAmelCase : Tuple ):
return cls(backbone_config=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE_ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = version.parse("1.11" )
@property
def lowerCAmelCase_ ( self : str ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def lowerCAmelCase_ ( self : Dict ):
return 1E-5
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return 12
| 210
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ""
lowercase_ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : Optional[int] , _lowerCAmelCase : Optional[DatasetInfo] = None , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : int , ):
super().__init__(self , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = repo_info
SCREAMING_SNAKE_CASE_ = token
SCREAMING_SNAKE_CASE_ = None
def lowerCAmelCase_ ( self : Tuple ):
if self.dir_cache is None:
SCREAMING_SNAKE_CASE_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
SCREAMING_SNAKE_CASE_ = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowerCAmelCase ): {'name': str(_lowerCAmelCase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : str = "rb" , **_lowerCAmelCase : Optional[Any] , ):
if not isinstance(self.repo_info , _lowerCAmelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
SCREAMING_SNAKE_CASE_ = hf_hub_url(self.repo_info.id , _lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
_lowerCAmelCase , mode=_lowerCAmelCase , headers=get_authentication_headers_for_url(_lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Any , **_lowerCAmelCase : Dict ):
self._get_dirs()
SCREAMING_SNAKE_CASE_ = self._strip_protocol(_lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Any=False , **_lowerCAmelCase : str ):
self._get_dirs()
SCREAMING_SNAKE_CASE_ = PurePosixPath(path.strip('/' ) )
SCREAMING_SNAKE_CASE_ = {}
for p, f in self.dir_cache.items():
SCREAMING_SNAKE_CASE_ = PurePosixPath(p.strip('/' ) )
SCREAMING_SNAKE_CASE_ = p.parent
if root == path:
SCREAMING_SNAKE_CASE_ = f
SCREAMING_SNAKE_CASE_ = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 210
| 1
|
'''simple docstring'''
import math
def snake_case_ (_a : float , _a : float ):
return math.pow(_a , 2 ) - a
def snake_case_ (_a : float ):
return 2 * x
def snake_case_ (_a : float ):
UpperCAmelCase = 2.0
while start <= a:
UpperCAmelCase = math.pow(_a , 2 )
return start
def snake_case_ (_a : float , _a : int = 9_9_9_9 , _a : float = 0.00_0000_0000_0001 ):
if a < 0:
raise ValueError('''math domain error''' )
UpperCAmelCase = get_initial_point(_a )
for _ in range(_a ):
UpperCAmelCase = value
UpperCAmelCase = value - fx(_a , _a ) / fx_derivative(_a )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 34
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> str:
__magic_name__ : Tuple = """ylacombe/bark-small"""
__magic_name__ : List[str] = tempfile.mkdtemp()
__magic_name__ : Optional[Any] = """en_speaker_1"""
__magic_name__ : Union[str, Any] = """This is a test string"""
__magic_name__ : Optional[int] = """speaker_embeddings_path.json"""
__magic_name__ : Any = """speaker_embeddings"""
def __magic_name__ ( self , **lowerCAmelCase__ ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Optional[Any] = self.get_tokenizer()
__magic_name__ : int = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__magic_name__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__magic_name__ : str = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__ ( self ) -> Any:
__magic_name__ : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__magic_name__ : Union[str, Any] = 35
__magic_name__ : List[Any] = 2
__magic_name__ : Dict = 8
__magic_name__ : Tuple = {
"""semantic_prompt""": np.ones(lowerCAmelCase__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__magic_name__ : Optional[int] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__magic_name__ : Dict = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Optional[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__magic_name__ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__magic_name__ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : str = self.get_tokenizer()
__magic_name__ : Dict = BarkProcessor(tokenizer=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = processor(text=self.input_string )
__magic_name__ : List[Any] = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 342
| 0
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCamelCase__ ) , """Tatoeba directory does not exist.""" )
class a__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A )
@slow
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=A )
assert mmeta["long_pair"] == "heb-eng"
| 180
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> list[int]:
a = 2
a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__UpperCamelCase)
if n > 1:
factors.append(__UpperCamelCase)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180
| 1
|
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCamelCase :List[str] = logging.get_logger(__name__)
lowerCamelCase :Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
lowerCamelCase :int = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Tuple = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
A_ : Optional[int] = getattr(lowercase_ , lowercase_ ).shape
else:
A_ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : List[Any] = value
elif weight_type == "weight_v":
A_ : Optional[int] = value
elif weight_type == "bias":
A_ : Optional[Any] = value
else:
A_ : str = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = []
A_ : Optional[Any] = fairseq_model.state_dict()
A_ : Any = hf_model.feature_extractor
A_ : Optional[int] = hf_model.adapter
for name, value in fairseq_dict.items():
A_ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == """group""" , )
A_ : Optional[int] = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
A_ : Any = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A_ : int = True
if "*" in mapped_key:
A_ : Dict = name.split(lowercase_ )[0].split(""".""" )[-2]
A_ : Optional[Any] = mapped_key.replace("""*""" , lowercase_ )
if "weight_g" in name:
A_ : List[Any] = """weight_g"""
elif "weight_v" in name:
A_ : List[str] = """weight_v"""
elif "bias" in name:
A_ : Optional[Any] = """bias"""
elif "weight" in name:
A_ : Dict = """weight"""
else:
A_ : str = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(f'Unused weights: {unused_weights}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = full_name.split("""conv_layers.""" )[-1]
A_ : Tuple = name.split(""".""" )
A_ : Dict = int(items[0] )
A_ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A_ : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A_ : Dict = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A_ : str = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A_ : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase_ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = full_name.split("""adaptor.""" )[-1]
A_ : Dict = name.split(""".""" )
if items[1].isdigit():
A_ : str = int(items[1] )
else:
A_ : Dict = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
A_ : Tuple = value
logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
A_ : str = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
A_ : Dict = value
logger.info(f'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
A_ : Union[str, Any] = value
logger.info(f'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(lowercase_ , lowercase_ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
A_ : str = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
A_ : List[Any] = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(lowercase_ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = emb.weight.shape
A_ : Union[str, Any] = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
A_ : Optional[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
'''simple docstring'''
A_ : Optional[Any] = WavaVecaConfig.from_pretrained(
lowercase_ , add_adapter=lowercase_ , adapter_stride=lowercase_ , adapter_kernel_size=lowercase_ , use_auth_token=lowercase_ , output_hidden_size=lowercase_ , )
A_ : List[str] = MBartConfig.from_pretrained(lowercase_ )
# load model
A_ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
A_ : Tuple = model[0].eval()
# load feature extractor
A_ : str = WavaVecaFeatureExtractor.from_pretrained(lowercase_ , use_auth_token=lowercase_ )
# set weights for wav2vec2 encoder
A_ : Optional[Any] = WavaVecaModel(lowercase_ )
recursively_load_weights_wavaveca(model.encoder , lowercase_ )
# load decoder weights
A_ : Optional[int] = MBartForCausalLM(lowercase_ )
A_ : List[Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowercase_ )
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
A_ : Union[str, Any] = SpeechEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
A_ : Optional[int] = False
A_ : Dict = MBartaaTokenizer(lowercase_ )
tokenizer.save_pretrained(lowercase_ )
A_ : str = hf_wavavec.config.to_dict()
A_ : Union[str, Any] = tokenizer.pad_token_id
A_ : List[str] = tokenizer.bos_token_id
A_ : Tuple = tokenizer.eos_token_id
A_ : List[Any] = """mbart50"""
A_ : Tuple = """wav2vec2"""
A_ : Union[str, Any] = tokenizer.eos_token_id
A_ : Tuple = 25_00_04
A_ : int = tokenizer.eos_token_id
A_ : Any = SpeechEncoderDecoderConfig.from_dict(lowercase_ )
hf_wavavec.save_pretrained(lowercase_ )
feature_extractor.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase :int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1_0_2_4, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=2_5_0_0_0_4, type=int, help='''`decoder_start_token_id` of model config''')
lowerCamelCase :List[str] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 206
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A_ : List[str] = logging.get_logger(__name__)
A_ : Tuple = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A_ : List[Any] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
A_ : Tuple = {
'junnyu/roformer_chinese_small': 1536,
'junnyu/roformer_chinese_base': 1536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
A_ : Union[str, Any] = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Dict = VOCAB_FILES_NAMES
UpperCAmelCase__: Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: List[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__: Optional[int] = RoFormerTokenizer
def __init__( self , A__=None , A__=None , A__=True , A__="[UNK]" , A__="[SEP]" , A__="[PAD]" , A__="[CLS]" , A__="[MASK]" , A__=True , A__=None , **A__ , ):
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
A__ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , A__ ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , A__ ) != strip_accents
):
A__ : List[Any] = getattr(A__ , pre_tok_state.pop("""type""" ) )
A__ : Optional[int] = do_lower_case
A__ : int = strip_accents
A__ : str = pre_tok_class(**A__ )
A__ : Any = do_lower_case
def __getstate__( self ):
A__ : int = self.__dict__.copy()
A__ : Union[str, Any] = BertPreTokenizer()
return state
def __setstate__( self , A__ ):
A__ : Union[str, Any] = d
A__ : Union[str, Any] = self.__dict__["""_tokenizer"""].get_vocab()
A__ : Dict = PreTokenizer.custom(JiebaPreTokenizer(A__ ) )
def __A ( self , A__ , A__=None ):
A__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , A__ , A__ = None ):
A__ : Tuple = [self.sep_token_id]
A__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A__ , A__ = None ):
A__ : Any = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def __A ( self , A__ , A__=None , A__=None , A__=False , **A__ , ):
A__ : str = BertPreTokenizer()
return super().save_pretrained(A__ , A__ , A__ , A__ , **A__ )
| 192
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "informer"
lowercase__ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : str ,lowercase_ : Optional[int] = None ,lowercase_ : Optional[int] = None ,lowercase_ : str = "student_t" ,lowercase_ : str = "nll" ,lowercase_ : int = 1 ,lowercase_ : List[int] = None ,lowercase_ : Optional[Union[str, bool]] = "mean" ,lowercase_ : int = 0 ,lowercase_ : int = 0 ,lowercase_ : int = 0 ,lowercase_ : int = 0 ,lowercase_ : Optional[List[int]] = None ,lowercase_ : Optional[List[int]] = None ,lowercase_ : int = 6_4 ,lowercase_ : int = 3_2 ,lowercase_ : int = 3_2 ,lowercase_ : int = 2 ,lowercase_ : int = 2 ,lowercase_ : int = 2 ,lowercase_ : int = 2 ,lowercase_ : bool = True ,lowercase_ : str = "gelu" ,lowercase_ : float = 0.05 ,lowercase_ : float = 0.1 ,lowercase_ : float = 0.1 ,lowercase_ : float = 0.1 ,lowercase_ : float = 0.1 ,lowercase_ : int = 1_0_0 ,lowercase_ : float = 0.02 ,lowercase_ : Union[str, Any]=True ,lowercase_ : str = "prob" ,lowercase_ : int = 5 ,lowercase_ : bool = True ,**lowercase_ : List[str] ,):
# time series specific configuration
lowerCAmelCase__ : Union[str, Any] = prediction_length
lowerCAmelCase__ : str = context_length or prediction_length
lowerCAmelCase__ : Any = distribution_output
lowerCAmelCase__ : Optional[int] = loss
lowerCAmelCase__ : Optional[int] = input_size
lowerCAmelCase__ : str = num_time_features
lowerCAmelCase__ : Dict = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase__ : Dict = scaling
lowerCAmelCase__ : str = num_dynamic_real_features
lowerCAmelCase__ : List[str] = num_static_real_features
lowerCAmelCase__ : List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowerCAmelCase__ : List[Any] = cardinality
else:
lowerCAmelCase__ : str = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowerCAmelCase__ : List[str] = embedding_dimension
else:
lowerCAmelCase__ : Any = [min(5_0 ,(cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase__ : List[Any] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase__ : int = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase__ : List[Any] = d_model
lowerCAmelCase__ : int = encoder_attention_heads
lowerCAmelCase__ : Union[str, Any] = decoder_attention_heads
lowerCAmelCase__ : int = encoder_ffn_dim
lowerCAmelCase__ : Tuple = decoder_ffn_dim
lowerCAmelCase__ : Optional[int] = encoder_layers
lowerCAmelCase__ : Optional[int] = decoder_layers
lowerCAmelCase__ : Any = dropout
lowerCAmelCase__ : List[Any] = attention_dropout
lowerCAmelCase__ : List[Any] = activation_dropout
lowerCAmelCase__ : List[Any] = encoder_layerdrop
lowerCAmelCase__ : List[str] = decoder_layerdrop
lowerCAmelCase__ : Union[str, Any] = activation_function
lowerCAmelCase__ : int = init_std
lowerCAmelCase__ : List[Any] = use_cache
# Informer
lowerCAmelCase__ : Any = attention_type
lowerCAmelCase__ : int = sampling_factor
lowerCAmelCase__ : Dict = distil
super().__init__(is_encoder_decoder=lowercase_ ,**lowercase_ )
@property
def __lowerCAmelCase ( self : Optional[int] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 74
|
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(A_ , 2 ) - pow(A_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(A_ , 2 ) - pow(A_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(A_ , 2 ) + pow(A_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74
| 1
|
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_A = '''sshleifer/mar_enro_6_3_student'''
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().setUp()
UpperCamelCase_ = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=__UpperCamelCase , )
UpperCamelCase_ = f'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def lowerCamelCase_ ( self ):
"""simple docstring"""
MarianMTModel.from_pretrained(__UpperCamelCase )
@slow
@require_torch_gpu
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = {
"""$MAX_LEN""": 6_4,
"""$BS""": 6_4,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
UpperCamelCase_ = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
UpperCamelCase_ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
UpperCamelCase_ = bash_script.replace(__UpperCamelCase , str(__UpperCamelCase ) )
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCamelCase_ = f'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCamelCase_ = ["""finetune.py"""] + bash_script.split() + args
with patch.object(__UpperCamelCase , """argv""" , __UpperCamelCase ):
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = pl.Trainer.add_argparse_args(__UpperCamelCase )
UpperCamelCase_ = SummarizationModule.add_model_specific_args(__UpperCamelCase , os.getcwd() )
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = main(__UpperCamelCase )
# Check metrics
UpperCamelCase_ = load_json(model.metrics_save_path )
UpperCamelCase_ = metrics["""val"""][0]
UpperCamelCase_ = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'''val_avg_{model.val_metric}'''] , __UpperCamelCase )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 1_7 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCamelCase_ = os.listdir(__UpperCamelCase )
UpperCamelCase_ = [x for x in contents if x.endswith(""".ckpt""" )][0]
UpperCamelCase_ = os.path.join(args.output_dir , __UpperCamelCase )
UpperCamelCase_ = torch.load(__UpperCamelCase , map_location="""cpu""" )
UpperCamelCase_ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCamelCase_ = {os.path.basename(__UpperCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
@timeout_decorator.timeout(6_0_0 )
@slow
@require_torch_gpu
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = f'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
UpperCamelCase_ = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 1_2_8,
"""$BS""": 1_6,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
UpperCamelCase_ = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
UpperCamelCase_ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
UpperCamelCase_ = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
UpperCamelCase_ = bash_script.replace(__UpperCamelCase , str(__UpperCamelCase ) )
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = bash_script.replace("""--fp16""" , """""" )
UpperCamelCase_ = 6
UpperCamelCase_ = (
["""distillation.py"""]
+ bash_script.split()
+ [
f'''--output_dir={output_dir}''',
"""--gpus=1""",
"""--learning_rate=1e-3""",
f'''--num_train_epochs={epochs}''',
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(__UpperCamelCase , """argv""" , __UpperCamelCase ):
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = pl.Trainer.add_argparse_args(__UpperCamelCase )
UpperCamelCase_ = SummarizationDistiller.add_model_specific_args(__UpperCamelCase , os.getcwd() )
UpperCamelCase_ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCamelCase_ = distill_main(__UpperCamelCase )
# Check metrics
UpperCamelCase_ = load_json(model.metrics_save_path )
UpperCamelCase_ = metrics["""val"""][0]
UpperCamelCase_ = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'''val_avg_{model.val_metric}'''] , __UpperCamelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCamelCase_ = os.listdir(__UpperCamelCase )
UpperCamelCase_ = [x for x in contents if x.endswith(""".ckpt""" )][0]
UpperCamelCase_ = os.path.join(args.output_dir , __UpperCamelCase )
UpperCamelCase_ = torch.load(__UpperCamelCase , map_location="""cpu""" )
UpperCamelCase_ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCamelCase_ = {os.path.basename(__UpperCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 122
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_A = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
_A = 10
_A = 256
def lowerCamelCase__ ( a__ : List[str] ) -> Optional[MinHash]:
if len(a__ ) < MIN_NUM_TOKENS:
return None
UpperCamelCase_ = MinHash(num_perm=a__ )
for token in set(a__ ):
min_hash.update(token.encode() )
return min_hash
def lowerCamelCase__ ( a__ : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(a__ ) if len(t.strip() ) > 0}
class lowercase_ :
def __init__( self , *,
__UpperCamelCase = 0.85 , ):
"""simple docstring"""
UpperCamelCase_ = duplication_jaccard_threshold
UpperCamelCase_ = NUM_PERM
UpperCamelCase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
UpperCamelCase_ = defaultdict(__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self._index.query(__UpperCamelCase )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(__UpperCamelCase , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__UpperCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = []
for base, duplicates in self._duplicate_clusters.items():
UpperCamelCase_ = [base] + list(__UpperCamelCase )
# reformat the cluster to be a list of dict
UpperCamelCase_ = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__UpperCamelCase )
return duplicate_clusters
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.get_duplicate_clusters()
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase__ ( a__ : Optional[int] ) -> List[str]:
UpperCamelCase_ , UpperCamelCase_ = element
UpperCamelCase_ = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCamelCase__ ( a__ : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(a__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowerCamelCase__ ( a__ : Type[Dataset] , a__ : float ) -> List[Any]:
UpperCamelCase_ = DuplicationIndex(duplication_jaccard_threshold=a__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(a__ ) ) , max_queue_size=100 ) ):
di.add(a__ , a__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCamelCase__ ( a__ : str , a__ : str ) -> float:
UpperCamelCase_ = get_tokens(a__ )
UpperCamelCase_ = get_tokens(a__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_A = None
def lowerCamelCase__ ( a__ : str , a__ : str ) -> Optional[Any]:
UpperCamelCase_ = []
for elementa in cluster:
UpperCamelCase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
UpperCamelCase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(a__ , a__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
UpperCamelCase_ = 1
extremes.append(a__ )
return extremes
def lowerCamelCase__ ( a__ : str , a__ : Optional[int] , a__ : Optional[int] ) -> str:
global _shared_dataset
UpperCamelCase_ = dataset
UpperCamelCase_ = []
UpperCamelCase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=a__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
a__ , a__ , ) , total=len(a__ ) , ):
extremes_list.append(a__ )
return extremes_list
def lowerCamelCase__ ( a__ : Type[Dataset] , a__ : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
UpperCamelCase_ = make_duplicate_clusters(a__ , a__ )
UpperCamelCase_ = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
UpperCamelCase_ = {}
UpperCamelCase_ = find_extremes(a__ , a__ , a__ )
for extremes in extremes_clusters:
for element in extremes:
UpperCamelCase_ = element
UpperCamelCase_ = duplicate_indices - set(extreme_dict.keys() )
UpperCamelCase_ = dataset.filter(lambda a__ , a__ : idx not in remove_indices , with_indices=a__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
UpperCamelCase_ = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
UpperCamelCase_ = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'''Original dataset size: {len(a__ )}''' )
print(f'''Number of duplicate clusters: {len(a__ )}''' )
print(f'''Files in duplicate cluster: {len(a__ )}''' )
print(f'''Unique files in duplicate cluster: {len(a__ )}''' )
print(f'''Filtered dataset size: {len(a__ )}''' )
return ds_filter, duplicate_clusters
| 122
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {"""vocab_file""": """sentencepiece.model"""}
__lowerCamelCase : Optional[Any] = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
__lowerCamelCase : Union[str, Any] = {
"""google/rembert""": 256,
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , __A : int , __A : Dict=False , __A : Any=True , __A : Optional[Any]=True , __A : Any="[CLS]" , __A : int="[SEP]" , __A : int="[UNK]" , __A : List[Any]="[SEP]" , __A : Tuple="[PAD]" , __A : Optional[Any]="[CLS]" , __A : Tuple="[MASK]" , **__A : Union[str, Any] , ):
super().__init__(
do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , )
snake_case__ : Any = do_lower_case
snake_case__ : Dict = remove_space
snake_case__ : Any = keep_accents
snake_case__ : List[str] = vocab_file
snake_case__ : int = spm.SentencePieceProcessor()
self.sp_model.Load(__A )
@property
def _lowercase ( self : Optional[Any] ):
return len(self.sp_model )
def _lowercase ( self : Optional[Any] ):
snake_case__ : str = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
snake_case__ : List[Any] = self.__dict__.copy()
snake_case__ : Optional[int] = None
return state
def __setstate__( self : Optional[Any] , __A : List[Any] ):
snake_case__ : Dict = d
snake_case__ : List[Any] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def _lowercase ( self : Dict , __A : Dict , __A : List[str]=False ):
snake_case__ : Optional[Any] = self.sp_model.EncodeAsPieces(__A )
return pieces
def _lowercase ( self : str , __A : Dict ):
return self.sp_model.PieceToId(__A )
def _lowercase ( self : Tuple , __A : Optional[Any] ):
return self.sp_model.IdToPiece(__A )
def _lowercase ( self : int , __A : Dict ):
snake_case__ : Tuple = self.sp_model.decode_pieces(__A )
return out_string
def _lowercase ( self : Union[str, Any] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Optional[Any] = [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1]
def _lowercase ( self : Dict , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Tuple = [self.sep_token_id]
snake_case__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self : Tuple , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error("Vocabulary path ({}) should be a directory".format(__A ) )
return
snake_case__ : Union[str, Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,)
| 286
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Any = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 286
| 1
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = TaConfig.from_json_file(lowercase )
print(F"Building PyTorch model from configuration: {config}" )
__lowercase = TaForConditionalGeneration(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowercase , lowercase , lowercase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(lowercase )
if __name__ == "__main__":
__a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__a : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 210
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__a : str = 1_6
__a : str = 3_2
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return int(x / 2**20 )
class _UpperCamelCase :
"""simple docstring"""
def __enter__( self ) -> str:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__lowercase = torch.cuda.memory_allocated()
return self
def __exit__( self , *lowerCAmelCase__ ) -> int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
__lowercase = torch.cuda.memory_allocated()
__lowercase = torch.cuda.max_memory_allocated()
__lowercase = bamb(self.end - self.begin )
__lowercase = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def UpperCAmelCase ( lowercase , lowercase = 16 , lowercase = "bert-base-cased" , lowercase = 320 , lowercase = 160 , ):
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained(lowercase )
__lowercase = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': F"train[:{n_train}]", '''validation''': F"validation[:{n_val}]"} )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase = datasets.map(
lowercase , batched=lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(lowercase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__lowercase = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
__lowercase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase = config['''lr''']
__lowercase = int(config['''num_epochs'''] )
__lowercase = int(config['''seed'''] )
__lowercase = int(config['''batch_size'''] )
__lowercase = args.model_name_or_path
set_seed(lowercase )
__lowercase , __lowercase = get_dataloaders(lowercase , lowercase , lowercase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase = AutoModelForSequenceClassification.from_pretrained(lowercase , return_dict=lowercase )
# Instantiate optimizer
__lowercase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowercase = optimizer_cls(params=model.parameters() , lr=lowercase )
if accelerator.state.deepspeed_plugin is not None:
__lowercase = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__lowercase = 1
__lowercase = (len(lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowercase = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=0 , num_training_steps=lowercase , )
else:
__lowercase = DummyScheduler(lowercase , total_num_steps=lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# We need to keep track of how many total steps we have iterated over
__lowercase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowercase = 0
# Now we train the model
__lowercase = {}
for epoch in range(lowercase , lowercase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowercase ):
__lowercase = model(**lowercase )
__lowercase = outputs.loss
__lowercase = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__lowercase = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(lowercase , lowercase )
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=lowercase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowercase , )
parser.add_argument(
'''--output_dir''' , type=lowercase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=lowercase , default=lowercase , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=lowercase , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=lowercase , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=lowercase , default=1 , help='''Number of train epochs.''' , )
__lowercase = parser.parse_args()
__lowercase = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 210
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 219
|
'''simple docstring'''
from math import ceil
def a ( __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :str = list(range(0 , __a ) )
UpperCamelCase__ :Optional[int] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
UpperCamelCase__ :Optional[int] = []
for i in device_map_blocks:
if device_map_blocks.count(__a ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__a )
# Missing blocks
UpperCamelCase__ :List[str] = [i for i in blocks if i not in device_map_blocks]
UpperCamelCase__ :Optional[Any] = [i for i in device_map_blocks if i not in blocks]
if len(__a ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(__a ) )
if len(__a ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(__a ) )
if len(__a ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(__a ) )
def a ( __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = list(range(__a ) )
UpperCamelCase__ :Any = int(ceil(n_layers / len(__a ) ) )
UpperCamelCase__ :List[Any] = [layers[i : i + n_blocks] for i in range(0 , __a , __a )]
return dict(zip(__a , __a ) )
| 219
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.