code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[Any]=7 ,lowerCamelCase__ : Union[str, Any]=3 ,lowerCamelCase__ : int=18 ,lowerCamelCase__ : Any=30 ,lowerCamelCase__ : str=400 ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : int=[0.5, 0.5, 0.5] ,lowerCamelCase__ : Tuple=[0.5, 0.5, 0.5] ,lowerCamelCase__ : Optional[Any]=False ,):
UpperCAmelCase__ = size if size is not None else {'height': 20, 'width': 20}
UpperCAmelCase__ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = image_size
UpperCAmelCase__ = min_resolution
UpperCAmelCase__ = max_resolution
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
UpperCAmelCase__ = do_center_crop
UpperCAmelCase__ = crop_size
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = image_mean
UpperCAmelCase__ = image_std
UpperCAmelCase__ = do_reduce_labels
def __lowerCAmelCase ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def a_ ( ):
UpperCAmelCase__ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCAmelCase__ = Image.open(dataset[0]['file'] )
UpperCAmelCase__ = Image.open(dataset[1]['file'] )
return image, map
def a_ ( ):
UpperCAmelCase__ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCAmelCase__ = Image.open(ds[0]['file'] )
UpperCAmelCase__ = Image.open(ds[1]['file'] )
UpperCAmelCase__ = Image.open(ds[2]['file'] )
UpperCAmelCase__ = Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = BeitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = BeitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'size' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'image_mean' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'image_std' ) )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 20, 'width': 20} )
self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} )
self.assertEqual(image_processor.do_reduce_labels ,lowerCamelCase__ )
UpperCAmelCase__ = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=lowerCamelCase__ )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
self.assertEqual(image_processor.do_reduce_labels ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
pass
def __lowerCAmelCase ( self : str ):
# Initialize image_processing
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,Image.Image )
# Test not batched input
UpperCAmelCase__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
UpperCAmelCase__ = image_processing(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def __lowerCAmelCase ( self : Dict ):
# Initialize image_processing
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,np.ndarray )
# Test not batched input
UpperCAmelCase__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
UpperCAmelCase__ = image_processing(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def __lowerCAmelCase ( self : Any ):
# Initialize image_processing
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,torch.Tensor )
# Test not batched input
UpperCAmelCase__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
UpperCAmelCase__ = image_processing(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def __lowerCAmelCase ( self : Optional[int] ):
# Initialize image_processing
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,torchify=lowerCamelCase__ )
UpperCAmelCase__ = []
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
UpperCAmelCase__ = image_processing(image_inputs[0] ,maps[0] ,return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(
encoding['labels'].shape ,(
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(encoding['labels'].dtype ,torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched
UpperCAmelCase__ = image_processing(lowerCamelCase__ ,lowerCamelCase__ ,return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(
encoding['labels'].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(encoding['labels'].dtype ,torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test not batched input (PIL images)
UpperCAmelCase__ , UpperCAmelCase__ = prepare_semantic_single_inputs()
UpperCAmelCase__ = image_processing(lowerCamelCase__ ,lowerCamelCase__ ,return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(
encoding['labels'].shape ,(
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(encoding['labels'].dtype ,torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched input (PIL images)
UpperCAmelCase__ , UpperCAmelCase__ = prepare_semantic_batch_inputs()
UpperCAmelCase__ = image_processing(lowerCamelCase__ ,lowerCamelCase__ ,return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape ,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(
encoding['labels'].shape ,(
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(encoding['labels'].dtype ,torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
def __lowerCAmelCase ( self : Optional[int] ):
# Initialize image_processing
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
UpperCAmelCase__ , UpperCAmelCase__ = prepare_semantic_single_inputs()
UpperCAmelCase__ = image_processing(lowerCamelCase__ ,lowerCamelCase__ ,return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 150 )
UpperCAmelCase__ = True
UpperCAmelCase__ = image_processing(lowerCamelCase__ ,lowerCamelCase__ ,return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
| 98
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowercase_ = logging.getLogger(__name__)
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
_lowercase =self.layer[current_layer](lowerCAmelCase , lowerCAmelCase , head_mask[current_layer] )
_lowercase =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =BertEncoderWithPabee(lowerCAmelCase )
self.init_weights()
_lowercase =0
_lowercase =0
_lowercase =0
_lowercase =0
def A__ ( self , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase =threshold
def A__ ( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowercase =patience
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =0
_lowercase =0
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =self.inference_layers_num / self.inference_instances_num
_lowercase =(
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase )
@add_start_docstrings_to_model_forward(lowerCAmelCase )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=False , ) -> str:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase =input_ids.size()
elif inputs_embeds is not None:
_lowercase =inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase =torch.ones(lowerCAmelCase , device=lowerCAmelCase )
if token_type_ids is None:
_lowercase =torch.zeros(lowerCAmelCase , dtype=torch.long , device=lowerCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase =self.get_extended_attention_mask(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_lowercase , _lowercase , _lowercase =encoder_hidden_states.size()
_lowercase =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_lowercase =torch.ones(lowerCAmelCase , device=lowerCAmelCase )
_lowercase =self.invert_attention_mask(lowerCAmelCase )
else:
_lowercase =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase =self.get_head_mask(lowerCAmelCase , self.config.num_hidden_layers )
_lowercase =self.embeddings(
input_ids=lowerCAmelCase , position_ids=lowerCAmelCase , token_type_ids=lowerCAmelCase , inputs_embeds=lowerCAmelCase )
_lowercase =embedding_output
if self.training:
_lowercase =[]
for i in range(self.config.num_hidden_layers ):
_lowercase =self.encoder.adaptive_forward(
lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase )
_lowercase =self.pooler(lowerCAmelCase )
_lowercase =output_layers[i](output_dropout(lowerCAmelCase ) )
res.append(lowerCAmelCase )
elif self.patience == 0: # Use all layers for inference
_lowercase =self.encoder(
lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , )
_lowercase =self.pooler(encoder_outputs[0] )
_lowercase =[output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase )]
else:
_lowercase =0
_lowercase =None
_lowercase =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_lowercase =self.encoder.adaptive_forward(
lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase )
_lowercase =self.pooler(lowerCAmelCase )
_lowercase =output_layers[i](lowerCAmelCase )
if regression:
_lowercase =logits.detach()
if patient_result is not None:
_lowercase =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_lowercase =0
else:
_lowercase =logits.detach().argmax(dim=1 )
if patient_result is not None:
_lowercase =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase ) ):
patient_counter += 1
else:
_lowercase =0
_lowercase =logits
if patient_counter == self.patience:
break
_lowercase =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =config.num_labels
_lowercase =BertModelWithPabee(lowerCAmelCase )
_lowercase =nn.Dropout(config.hidden_dropout_prob )
_lowercase =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.bert(
input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , position_ids=lowerCAmelCase , head_mask=lowerCAmelCase , inputs_embeds=lowerCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_lowercase =(logits[-1],)
if labels is not None:
_lowercase =None
_lowercase =0
for ix, logits_item in enumerate(lowerCAmelCase ):
if self.num_labels == 1:
# We are doing regression
_lowercase =MSELoss()
_lowercase =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_lowercase =CrossEntropyLoss()
_lowercase =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_lowercase =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_lowercase =(total_loss / total_weights,) + outputs
return outputs
| 205
| 0
|
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : List[str] = 0
__snake_case : Any = number
while duplicate > 0:
__snake_case , __snake_case : Any = divmod(__SCREAMING_SNAKE_CASE , 1_0 )
fact_sum += factorial(__SCREAMING_SNAKE_CASE )
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
lowercase_ = int(input("Enter number: ").strip())
print(
F'''{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.'''
)
| 20
|
from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__snake_case , __snake_case : str = array[indexa], array[indexa]
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if length > 1:
__snake_case : Tuple = int(length / 2 )
for i in range(__SCREAMING_SNAKE_CASE , low + middle ):
comp_and_swap(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i + middle , __SCREAMING_SNAKE_CASE )
bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
bitonic_merge(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if length > 1:
__snake_case : Optional[Any] = int(length / 2 )
bitonic_sort(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 )
bitonic_sort(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , 0 )
bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 20
| 1
|
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_a = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a , __a=None , __a=1) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = tokenizer
_UpperCamelCase = dataset
_UpperCamelCase = len(__a) if n_tasks is None else n_tasks
_UpperCamelCase = n_copies
def __iter__( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = []
for task in range(self.n_tasks):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip())
_UpperCamelCase = self.tokenizer(__a , padding=__a , return_tensors='''pt''')
for task in range(self.n_tasks):
for _ in range(self.n_copies):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a , __a) -> int:
'''simple docstring'''
_UpperCamelCase = start_length
_UpperCamelCase = eof_strings
_UpperCamelCase = tokenizer
def __call__( self , __a , __a , **__a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer.batch_decode(input_ids[:, self.start_length :])
_UpperCamelCase = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings))
return all(__a)
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = re.split('''(%s)''' % '''|'''.join(__snake_case ), __snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case=20, **__snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
_UpperCamelCase = batch['''ids'''].shape[-1]
_UpperCamelCase = accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']], num_return_sequences=__snake_case, **__snake_case )
# each task is generated batch_size times
_UpperCamelCase = batch['''task_id'''].repeat(__snake_case )
_UpperCamelCase = accelerator.pad_across_processes(
__snake_case, dim=1, pad_index=tokenizer.pad_token_id )
_UpperCamelCase , _UpperCamelCase = accelerator.gather((generated_tokens, generated_tasks) )
_UpperCamelCase = generated_tokens.cpu().numpy()
_UpperCamelCase = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case, __snake_case ):
gen_token_dict[task].append(__snake_case )
_UpperCamelCase = [[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_UpperCamelCase = tokenizer.decode(__snake_case, skip_special_tokens=__snake_case, clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def lowerCamelCase__ ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser(__snake_case )
_UpperCamelCase = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_UpperCamelCase = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_UpperCamelCase = '''false'''
if args.num_workers is None:
_UpperCamelCase = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_UpperCamelCase = Accelerator()
set_seed(args.seed, device_specific=__snake_case )
# Load model and tokenizer
_UpperCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCamelCase = tokenizer.eos_token
_UpperCamelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_UpperCamelCase = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0, __snake_case, __snake_case )] ),
}
# Load evaluation dataset and metric
_UpperCamelCase = load_dataset('''openai_humaneval''' )
_UpperCamelCase = load_metric('''code_eval''' )
_UpperCamelCase = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
_UpperCamelCase = args.n_samples // args.batch_size
_UpperCamelCase = TokenizedDataset(__snake_case, human_eval['''test'''], n_copies=__snake_case, n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
_UpperCamelCase = DataLoader(__snake_case, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_UpperCamelCase = code_eval_metric.compute(references=[''''''], predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
_UpperCamelCase , _UpperCamelCase = accelerator.prepare(__snake_case, __snake_case )
_UpperCamelCase = complete_code(
__snake_case, __snake_case, __snake_case, __snake_case, n_tasks=__snake_case, batch_size=args.batch_size, **__snake_case, )
if accelerator.is_main_process:
_UpperCamelCase = []
for task in tqdm(range(__snake_case ) ):
_UpperCamelCase = human_eval['''test'''][task]['''test''']
_UpperCamelCase = F'''check({human_eval["test"][task]["entry_point"]})'''
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
_UpperCamelCase , _UpperCamelCase = code_eval_metric.compute(
references=__snake_case, predictions=__snake_case, num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file, '''w''' ) as fp:
json.dump(__snake_case, __snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 194
|
"""simple docstring"""
def lowerCamelCase__ ( __snake_case = 10_00 ) -> int:
"""simple docstring"""
_UpperCamelCase = 2**power
_UpperCamelCase = str(__snake_case )
_UpperCamelCase = list(__snake_case )
_UpperCamelCase = 0
for i in list_num:
sum_of_num += int(__snake_case )
return sum_of_num
if __name__ == "__main__":
_a = int(input("""Enter the power of 2: """).strip())
print("""2 ^ """, power, """ = """, 2**power)
_a = solution(power)
print("""Sum of the digits is: """, result)
| 194
| 1
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int=False):
try:
lowercase__ : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Union[str, Any] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skip("Test was skipped")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
return unittest.skipUnless(_run_slow_tests , "test is slow")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Dict):
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_xpu_available() , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_tpu_available() , "test requires TPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(is_torch_version(">=" , "1.12.0") , "test requires torch version >= 1.12.0")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None):
if test_case is None:
return partial(_lowerCamelCase , version=_lowerCamelCase)
return unittest.skipUnless(is_torch_version(">=" , _lowerCamelCase) , f'''test requires torch version >= {version}''')(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_wandb_available() , "test requires wandb")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml")(_lowerCamelCase)
UpperCamelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCamelCase)
class snake_case_ ( unittest.TestCase ):
__A : int = True
@classmethod
def __UpperCamelCase ( cls : str ) -> str:
lowercase__ : str = tempfile.mkdtemp()
@classmethod
def __UpperCamelCase ( cls : List[str] ) -> Optional[Any]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCamelCase ( self : str ) -> Optional[int]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowercase_ )
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[mock.Mock, List[mock.Mock]] ) -> str:
lowercase__ : Tuple = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Tuple = AcceleratorState()
lowercase__ : Optional[int] = tensor[None].clone().to(state.device)
lowercase__ : Optional[int] = gather(_lowerCamelCase).cpu()
lowercase__ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0]):
if not torch.equal(tensors[i] , _lowerCamelCase):
return False
return True
class snake_case_ :
def __init__( self : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int ) -> Union[str, Any]:
lowercase__ : int = returncode
lowercase__ : Dict = stdout
lowercase__ : List[Any] = stderr
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str):
while True:
lowercase__ : int = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : str = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : Tuple = []
lowercase__ : List[Any] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:"))),
asyncio.create_task(_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:"))),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=180 , _lowerCamelCase : Dict=False , _lowerCamelCase : Dict=True):
lowercase__ : Optional[Any] = asyncio.get_event_loop()
lowercase__ : List[Any] = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : str = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Dict = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
return result
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any=False):
try:
lowercase__ : Optional[int] = subprocess.check_output(_lowerCamelCase , stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(_lowerCamelCase , "decode"):
lowercase__ : Optional[Any] = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(_lowerCamelCase)}` failed with the following error:\n\n{e.output.decode()}''') from e
| 333
|
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : int , **lowercase_ : List[str] ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : Tuple ) -> Any:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : List[str] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Any ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Dict , *lowercase_ : str , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Any , **lowercase_ : Optional[int] ) -> List[str]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : Dict , *lowercase_ : Dict , **lowercase_ : Any ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Any ) -> Tuple:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Dict ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : List[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Dict:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[str] ) -> int:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[Any] = ["flax"]
def __init__( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : Tuple , **lowercase_ : int ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Any , *lowercase_ : int , **lowercase_ : int ) -> Optional[int]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : Optional[Any] ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : str ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> List[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[int] = ["flax"]
def __init__( self : Any , *lowercase_ : str , **lowercase_ : Dict ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : List[str] , *lowercase_ : int , **lowercase_ : Union[str, Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : int ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[str] = ["flax"]
def __init__( self : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
| 333
| 1
|
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
lowerCamelCase__ = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowerCamelCase__ = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowerCamelCase__ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
lowerCamelCase__ = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
lowerCamelCase__ = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
lowerCamelCase__ = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
lowerCamelCase__ = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
lowerCamelCase__ = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
lowerCamelCase__ = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
lowerCamelCase__ = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
lowerCamelCase__ = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
lowerCamelCase__ = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
lowerCamelCase__ = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase : List[str] = FLAX_MODEL_MAPPING
lowerCamelCase__ = auto_class_update(FlaxAutoModel)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase : List[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCamelCase__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class lowerCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase : str = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCamelCase__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class lowerCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase : List[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCamelCase__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class lowerCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase : Optional[int] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase__ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase : Optional[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCamelCase__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class lowerCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase : Any = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase__ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCamelCase__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class lowerCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCamelCase__ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase : str = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase__ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase : Union[str, Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCamelCase__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class lowerCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase : List[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCamelCase__ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 234
|
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = old_name
if "patch_embed" in old_name:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = old_name.split("." )
if layer == "0":
_UpperCAmelCase : List[str] = old_name.replace("0" , "convolution1" )
elif layer == "1":
_UpperCAmelCase : Dict = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
_UpperCAmelCase : Tuple = old_name.replace("3" , "convolution2" )
else:
_UpperCAmelCase : Tuple = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d" , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = R"\b\d{2}\b"
if bool(re.search(__lowerCAmelCase , __lowerCAmelCase ) ):
_UpperCAmelCase : Optional[int] = re.search(R"\d\.\d\d." , __lowerCAmelCase ).group()
else:
_UpperCAmelCase : Any = re.search(R"\d\.\d." , __lowerCAmelCase ).group()
if int(match[0] ) < 6:
_UpperCAmelCase : str = old_name.replace(__lowerCAmelCase , "" )
_UpperCAmelCase : Optional[Any] = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
_UpperCAmelCase : Union[str, Any] = "intermediate_stages." + trimmed_name
else:
_UpperCAmelCase : Tuple = old_name.replace(__lowerCAmelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
_UpperCAmelCase : Any = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
_UpperCAmelCase : List[str] = str(int(match[2] ) - num_meta4D_last_stage )
_UpperCAmelCase : int = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
_UpperCAmelCase : Tuple = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
_UpperCAmelCase : int = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
_UpperCAmelCase : Optional[int] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
_UpperCAmelCase : List[str] = trimmed_name.replace("fc2" , "linear_out" )
_UpperCAmelCase : Optional[Any] = "last_stage." + trimmed_name
elif "network" in old_name and re.search(R".\d." , __lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
_UpperCAmelCase : Union[str, Any] = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_UpperCAmelCase : List[Any] = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_UpperCAmelCase : List[Any] = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
_UpperCAmelCase : Union[str, Any] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
_UpperCAmelCase : List[Any] = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
_UpperCAmelCase : str = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
_UpperCAmelCase : List[str] = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_UpperCAmelCase : List[Any] = new_name.replace("norm" , "layernorm" )
_UpperCAmelCase : Any = "efficientformer." + new_name
else:
_UpperCAmelCase : Dict = "efficientformer.encoder." + new_name
return new_name
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
for key in checkpoint.copy().keys():
_UpperCAmelCase : List[Any] = checkpoint.pop(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = val
return checkpoint
def __lowerCAmelCase ():
_UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Tuple = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return image
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = torch.load(__lowerCAmelCase , map_location="cpu" )["model"]
_UpperCAmelCase : Dict = EfficientFormerConfig.from_json_file(__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = EfficientFormerForImageClassificationWithTeacher(__lowerCAmelCase )
_UpperCAmelCase : Tuple = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
_UpperCAmelCase : Union[str, Any] = config.depths[-1] - config.num_metaad_blocks + 1
_UpperCAmelCase : Optional[int] = convert_torch_checkpoint(__lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
_UpperCAmelCase : Optional[Any] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
_UpperCAmelCase : int = prepare_img()
_UpperCAmelCase : List[str] = 256
_UpperCAmelCase : Optional[int] = 224
_UpperCAmelCase : Tuple = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
_UpperCAmelCase : Any = processor(images=__lowerCAmelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
_UpperCAmelCase : int = Compose(
[
Resize(__lowerCAmelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(__lowerCAmelCase ),
ToTensor(),
Normalize(__lowerCAmelCase , __lowerCAmelCase ),
] )
_UpperCAmelCase : Any = image_transforms(__lowerCAmelCase ).unsqueeze(0 )
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = model(__lowerCAmelCase )
_UpperCAmelCase : Dict = outputs.logits
_UpperCAmelCase : Optional[int] = (1, 1_000)
if "l1" in model_name:
_UpperCAmelCase : List[Any] = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10] , __lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_UpperCAmelCase : List[Any] = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10] , __lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_UpperCAmelCase : List[Any] = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(__lowerCAmelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=__lowerCAmelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=__lowerCAmelCase , )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
lowerCamelCase__ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 234
| 1
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCAmelCase__ = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCAmelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def snake_case_ ( ):
'''simple docstring'''
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase, 1_10 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : int = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : str = imread('''digital_image_processing/image_data/lena_small.jpg''', 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_lowerCamelCase : Any = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def snake_case_ ( ):
'''simple docstring'''
assert gg.gaussian_filter(_UpperCamelCase, 5, sigma=0.9 ).all()
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_lowerCamelCase : Tuple = conv.img_convolve(_UpperCamelCase, _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def snake_case_ ( ):
'''simple docstring'''
assert med.median_filter(_UpperCamelCase, 3 ).any()
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : str = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = sp.make_sepia(_UpperCamelCase, 20 )
assert sepia.all()
def snake_case_ ( A_ : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
_lowerCamelCase : Dict = bs.Burkes(imread(_UpperCamelCase, 1 ), 1_20 )
burkes.process()
assert burkes.output_img.any()
def snake_case_ ( A_ : str = "digital_image_processing/image_data/lena_small.jpg", ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = rs.NearestNeighbour(imread(_UpperCamelCase, 1 ), 4_00, 2_00 )
nn.process()
assert nn.output.any()
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
_lowerCamelCase : Tuple = imread(_UpperCamelCase, 0 )
# Test for get_neighbors_pixel function() return not None
_lowerCamelCase : Any = 0
_lowerCamelCase : Dict = 0
_lowerCamelCase : Union[str, Any] = image[x_coordinate][y_coordinate]
_lowerCamelCase : Union[str, Any] = lbp.get_neighbors_pixel(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_lowerCamelCase : int = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
_lowerCamelCase : str = lbp.local_binary_value(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
assert lbp_image.any()
| 359
|
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCAmelCase__ = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def snake_case_ ( A_ : Any ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def snake_case_ ( A_ : Dict, A_ : Any ):
'''simple docstring'''
if args.student_type == "roberta":
_lowerCamelCase : List[str] = False
elif args.student_type == "gpt2":
_lowerCamelCase : Any = False
def snake_case_ ( A_ : Optional[Any], A_ : List[Any] ):
'''simple docstring'''
if args.student_type == "roberta":
_lowerCamelCase : Optional[int] = False
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''', action='''store_true''', help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''', type=A_, required=A_, help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''', type=A_, required=A_, help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''', )
parser.add_argument(
'''--student_type''', type=A_, choices=['''distilbert''', '''roberta''', '''gpt2'''], required=A_, help='''The student type (DistilBERT, RoBERTa).''', )
parser.add_argument('''--student_config''', type=A_, required=A_, help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''', default=A_, type=A_, help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''', choices=['''bert''', '''roberta''', '''gpt2'''], required=A_, help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''', type=A_, required=A_, help='''The teacher model.''' )
parser.add_argument('''--temperature''', default=2.0, type=A_, help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''', default=0.5, type=A_, help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''', default=0.0, type=A_, help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''', )
parser.add_argument('''--alpha_clm''', default=0.5, type=A_, help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''', default=0.0, type=A_, help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''', default=0.0, type=A_, help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''', action='''store_true''', help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''', default=0.15, type=A_, help='''Proportion of tokens for which we need to make a prediction.''', )
parser.add_argument('''--word_mask''', default=0.8, type=A_, help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''', default=0.1, type=A_, help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''', default=0.1, type=A_, help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''', default=0.7, type=A_, help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''', )
parser.add_argument('''--token_counts''', type=A_, help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''', action='''store_true''', help='''If true, compute the distillation loss only the [MLM] prediction distribution.''', )
parser.add_argument(
'''--freeze_pos_embs''', action='''store_true''', help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''', )
parser.add_argument(
'''--freeze_token_type_embds''', action='''store_true''', help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''', )
parser.add_argument('''--n_epoch''', type=A_, default=3, help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''', type=A_, default=5, help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''', action='''store_false''', help='''If true, group sequences that have similar length into the same batch. Default is true.''', )
parser.add_argument(
'''--gradient_accumulation_steps''', type=A_, default=50, help='''Gradient accumulation for larger training batches.''', )
parser.add_argument('''--warmup_prop''', default=0.05, type=A_, help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''', default=0.0, type=A_, help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''', default=5E-4, type=A_, help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''', default=1E-6, type=A_, help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''', default=5.0, type=A_, help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''', default=0.02, type=A_, help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''', )
parser.add_argument(
'''--fp16_opt_level''', type=A_, default='''O1''', help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
), )
parser.add_argument('''--n_gpu''', type=A_, default=1, help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''', type=A_, default=-1, help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''', type=A_, default=56, help='''Random seed''' )
parser.add_argument('''--log_interval''', type=A_, default=5_00, help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''', type=A_, default=40_00, help='''Checkpoint interval.''' )
_lowerCamelCase : List[Any] = parser.parse_args()
sanity_checks(A_ )
# ARGS #
init_gpu_params(A_ )
set_seed(A_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path, '''parameters.json''' ), '''w''' ) as f:
json.dump(vars(A_ ), A_, indent=4 )
git_log(args.dump_path )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = MODEL_CLASSES[args.student_type]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_lowerCamelCase : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_lowerCamelCase : List[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_lowerCamelCase : Optional[int] = tokenizer.all_special_tokens.index(A_ )
_lowerCamelCase : Union[str, Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
_lowerCamelCase : Optional[Any] = special_tok_ids
_lowerCamelCase : str = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file, '''rb''' ) as fp:
_lowerCamelCase : Any = pickle.load(A_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts, '''rb''' ) as fp:
_lowerCamelCase : str = pickle.load(A_ )
_lowerCamelCase : List[Any] = np.maximum(A_, 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_lowerCamelCase : List[Any] = 0.0 # do not predict special tokens
_lowerCamelCase : str = torch.from_numpy(A_ )
else:
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Any = LmSeqsDataset(params=A_, data=A_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
_lowerCamelCase : str = student_config_class.from_pretrained(args.student_config )
_lowerCamelCase : Union[str, Any] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
_lowerCamelCase : Dict = student_model_class.from_pretrained(args.student_pretrained_weights, config=A_ )
else:
_lowerCamelCase : Optional[Any] = student_model_class(A_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
_lowerCamelCase : int = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=A_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A_, A_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A_, A_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_lowerCamelCase : Optional[int] = Distiller(
params=A_, dataset=A_, token_probs=A_, student=A_, teacher=A_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 175
| 0
|
'''simple docstring'''
class UpperCAmelCase ( UpperCamelCase__ ):
pass
class UpperCAmelCase ( UpperCamelCase__ ):
pass
class UpperCAmelCase :
def __init__( self :Dict )-> Tuple:
A__ = [
[],
[],
[],
]
def UpperCAmelCase_ ( self :int , lowercase_ :int , lowercase_ :int )-> None:
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(lowercase_ )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def UpperCAmelCase_ ( self :List[str] )-> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self :Union[str, Any] )-> str:
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class UpperCAmelCase :
def __init__( self :Dict )-> List[Any]:
A__ = []
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :int )-> None:
if len(self.queue ) == 1_00:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> int:
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
A__ = min(self.queue )
self.queue.remove(lowercase_ )
return data
def __str__( self :Optional[Any] )-> str:
return str(self.queue )
def UpperCamelCase ( ):
A__ = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def UpperCamelCase ( ):
A__ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 237
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__lowerCAmelCase : Optional[Any] ={
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """facebook/nllb-200-distilled-600M"""
__lowercase = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
__lowercase = """translator"""
__lowercase = AutoTokenizer
__lowercase = AutoModelForSeqaSeqLM
__lowercase = LANGUAGE_CODES
__lowercase = ["""text""", """text""", """text"""]
__lowercase = ["""text"""]
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Any , lowercase_ :List[str] , lowercase_ :int )-> str:
if src_lang not in self.lang_to_code:
raise ValueError(F"{src_lang} is not a supported language." )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"{tgt_lang} is not a supported language." )
A__ = self.lang_to_code[src_lang]
A__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowercase_ , return_tensors="pt" , src_lang=lowercase_ , tgt_lang=lowercase_ )
def UpperCAmelCase_ ( self :Dict , lowercase_ :Any )-> int:
return self.model.generate(**lowercase_ )
def UpperCAmelCase_ ( self :int , lowercase_ :Optional[Any] )-> str:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowercase_ )
| 237
| 1
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a :
def __init__( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : List[str]=13 , UpperCAmelCase : str=32 , UpperCAmelCase : str=3 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] , UpperCAmelCase : Any=[2, 2, 3, 2] , UpperCAmelCase : Tuple=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Tuple=37 , UpperCAmelCase : Union[str, Any]="gelu" , UpperCAmelCase : Optional[Any]=10 , UpperCAmelCase : Tuple=0.02 , UpperCAmelCase : List[str]=["stage2", "stage3", "stage4"] , UpperCAmelCase : Union[str, Any]=[2, 3, 4] , UpperCAmelCase : Dict=None , ):
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Dict = batch_size
lowerCAmelCase_ : Optional[Any] = image_size
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : int = num_stages
lowerCAmelCase_ : int = hidden_sizes
lowerCAmelCase_ : Dict = depths
lowerCAmelCase_ : List[str] = is_training
lowerCAmelCase_ : int = use_labels
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Union[str, Any] = hidden_act
lowerCAmelCase_ : Tuple = num_labels
lowerCAmelCase_ : Dict = initializer_range
lowerCAmelCase_ : str = out_features
lowerCAmelCase_ : Dict = out_indices
lowerCAmelCase_ : Optional[int] = scope
def A ( self : str ):
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ : List[Any] = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A ( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : str = ConvNextModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Tuple = model(UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : Dict = ConvNextForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Optional[int] = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Any = ConvNextBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Optional[int] = model(UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Dict = ConvNextBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Optional[int] = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self : Tuple ):
lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ : int = config_and_inputs
lowerCAmelCase_ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : int = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__snake_case : Any = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
__snake_case : Optional[Any] = True
__snake_case : Optional[int] = False
__snake_case : Dict = False
__snake_case : List[Any] = False
__snake_case : Dict = False
def A ( self : str ):
lowerCAmelCase_ : List[str] = ConvNextModelTester(self )
lowerCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def A ( self : str ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : List[str] ):
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def A ( self : Union[str, Any] ):
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def A ( self : Optional[int] ):
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def A ( self : List[Any] ):
pass
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Tuple = model_class(UpperCAmelCase )
lowerCAmelCase_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : int = [*signature.parameters.keys()]
lowerCAmelCase_ : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A ( self : Any ):
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def A ( self : List[Any] ):
def check_hidden_states_output(UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict ):
lowerCAmelCase_ : Dict = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowerCAmelCase_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Union[str, Any] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : Tuple = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A ( self : Any ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def A ( self : Union[str, Any] ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Optional[Any] = ConvNextModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def A ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Any = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(UpperCAmelCase )
lowerCAmelCase_ : str = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Dict = image_processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(**UpperCAmelCase )
# verify the logits
lowerCAmelCase_ : Optional[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCAmelCase_ : Tuple = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
@require_torch
class __a ( unittest.TestCase ,__UpperCamelCase ):
__snake_case : Any = (ConvNextBackbone,) if is_torch_available() else ()
__snake_case : Any = ConvNextConfig
__snake_case : Union[str, Any] = False
def A ( self : int ):
lowerCAmelCase_ : List[Any] = ConvNextModelTester(self )
| 361
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
def __init__( self : Union[str, Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Dict ):
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 28
| 0
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowercase : Tuple = {"""UserAgent""": UserAgent().random}
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> dict:
lowercase : Any = script.contents[0]
lowercase : Union[str, Any] = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __snake_case :
def __init__( self ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = f"https://www.instagram.com/{username}/"
lowercase : str = self.get_json()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = requests.get(self.url ,headers=snake_case ).text
lowercase : Dict = BeautifulSoup(snake_case ,"""html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self ):
'''simple docstring'''
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def _snake_case( SCREAMING_SNAKE_CASE__ = "github" ) -> None:
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
lowercase : Tuple = InstagramUser(SCREAMING_SNAKE_CASE__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , SCREAMING_SNAKE_CASE__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : List[str] = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 20
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Any:
lowercase : Dict = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
lowercase , lowercase : Optional[Any] = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowercase : Dict = f"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(SCREAMING_SNAKE_CASE__ )
assert base_extractor.is_extractable(SCREAMING_SNAKE_CASE__ )
lowercase : Any = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase : str = file_path.read_text(encoding="""utf-8""" )
else:
lowercase : Optional[Any] = output_path.read_text(encoding="""utf-8""" )
lowercase : Tuple = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Dict:
lowercase : str = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
lowercase : Optional[Any] = input_paths[compression_format]
if input_path is None:
lowercase : int = f"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = Extractor.infer_extractor_format(SCREAMING_SNAKE_CASE__ )
assert extractor_format is not None
lowercase : Any = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase : Dict = file_path.read_text(encoding="""utf-8""" )
else:
lowercase : int = output_path.read_text(encoding="""utf-8""" )
lowercase : Optional[Any] = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
import tarfile
lowercase : Tuple = tmp_path / """data_dot_dot"""
directory.mkdir()
lowercase : str = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(SCREAMING_SNAKE_CASE__ , """w""" ) as f:
f.add(SCREAMING_SNAKE_CASE__ , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[str]:
import tarfile
lowercase : Tuple = tmp_path / """data_sym_link"""
directory.mkdir()
lowercase : int = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=SCREAMING_SNAKE_CASE__ )
with tarfile.TarFile(SCREAMING_SNAKE_CASE__ , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : List[Any] = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
lowercase : Optional[int] = insecure_tar_files[insecure_tar_file]
lowercase : List[str] = tmp_path / """extracted"""
TarExtractor.extract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
lowercase : Any = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
lowercase : str = (
B"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
B"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
B"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
B"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
assert zipfile.is_zipfile(str(SCREAMING_SNAKE_CASE__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(SCREAMING_SNAKE_CASE__ ) # but we're right
| 20
| 1
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCAmelCase = RobertaTokenizer
_lowerCAmelCase = RobertaTokenizerFast
_lowerCAmelCase = True
_lowerCAmelCase = {"""cls_token""": """<s>"""}
def __UpperCAmelCase ( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_a = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
_a = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_a = {'unk_token': '<unk>'}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__magic_name__ ) )
def __UpperCAmelCase ( self , **__magic_name__ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
def __UpperCAmelCase ( self , **__magic_name__ ) -> Any:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[Any]:
_a = 'lower newer'
_a = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = 'lower newer'
_a = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_a = tokenizer.tokenize(__magic_name__ ) # , add_prefix_space=True)
self.assertListEqual(__magic_name__ , __magic_name__ )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def __UpperCAmelCase ( self ) -> List[str]:
_a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=__magic_name__ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=__magic_name__ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self ) -> str:
_a = self.tokenizer_class.from_pretrained('roberta-base' )
_a = tokenizer.encode('sequence builders' , add_special_tokens=__magic_name__ )
_a = tokenizer.encode('multi-sequence build' , add_special_tokens=__magic_name__ )
_a = tokenizer.encode(
'sequence builders' , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
_a = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
_a = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
_a = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self ) -> Dict:
_a = self.get_tokenizer()
_a = 'Encode this sequence.'
_a = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
_a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
_a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__magic_name__ , __magic_name__ )
_a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
_a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__magic_name__ , __magic_name__ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
_a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__magic_name__ , __magic_name__ )
# Testing spaces after special tokens
_a = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ )} ) # mask token has a left space
_a = tokenizer.convert_tokens_to_ids(__magic_name__ )
_a = 'Encode <mask> sequence'
_a = 'Encode <mask>sequence'
_a = tokenizer.encode(__magic_name__ )
_a = encoded.index(__magic_name__ )
_a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__magic_name__ , __magic_name__ )
_a = tokenizer.encode(__magic_name__ )
_a = encoded.index(__magic_name__ )
_a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__magic_name__ , __magic_name__ )
def __UpperCAmelCase ( self ) -> str:
pass
def __UpperCAmelCase ( self ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
_a = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
_a = 'A, <mask> AllenNLP sentence.'
_a = tokenizer_r.encode_plus(__magic_name__ , add_special_tokens=__magic_name__ , return_token_type_ids=__magic_name__ )
_a = tokenizer_p.encode_plus(__magic_name__ , add_special_tokens=__magic_name__ , return_token_type_ids=__magic_name__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__magic_name__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
__magic_name__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def __UpperCAmelCase ( self ) -> Dict:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
_a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , __magic_name__ )
self.assertEqual(post_processor_state['add_prefix_space'] , __magic_name__ )
self.assertEqual(post_processor_state['trim_offsets'] , __magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_a = f'{text_of_1_token} {text_of_1_token}'
_a = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
_a = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ) + 1, len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
_a = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ) + 1, len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
_a = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ), len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
_a = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ), len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
_a = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_a = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
_a = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__magic_name__ ) + 1, 1 + len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
_a = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__magic_name__ ), 1 + len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
_a = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__magic_name__ ), 1 + len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
| 356
|
'''simple docstring'''
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_a , _a = 1, 1
for _ in range(number_of_steps - 1 ):
_a , _a = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
| 0
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Any:
'''simple docstring'''
try:
__UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
__UpperCAmelCase = strtobool(SCREAMING_SNAKE_CASE )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
A_ : Union[str, Any] = parse_flag_from_env('RUN_SLOW', default=False)
def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skip('''Test was skipped''' )(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> int:
'''simple docstring'''
if test_case is None:
return partial(SCREAMING_SNAKE_CASE , version=SCREAMING_SNAKE_CASE )
return unittest.skipUnless(is_torch_version('''>=''' , SCREAMING_SNAKE_CASE ) , f'''test requires torch version >= {version}''' )(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(SCREAMING_SNAKE_CASE )
A_ : List[str] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __a ( SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(SCREAMING_SNAKE_CASE )
class A_ ( unittest.TestCase ):
'''simple docstring'''
a__ = True
@classmethod
def lowerCAmelCase_ (cls ) -> Optional[Any]:
__UpperCAmelCase = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase_ (cls ) -> int:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase_ (self ) -> Tuple:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowercase__ )
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Tuple:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self , lowercase__ ) -> Any:
__UpperCAmelCase = mocks if isinstance(lowercase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def __a ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = AcceleratorState()
__UpperCAmelCase = tensor[None].clone().to(state.device )
__UpperCAmelCase = gather(SCREAMING_SNAKE_CASE ).cpu()
__UpperCAmelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , SCREAMING_SNAKE_CASE ):
return False
return True
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
__UpperCAmelCase = returncode
__UpperCAmelCase = stdout
__UpperCAmelCase = stderr
async def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
while True:
__UpperCAmelCase = await stream.readline()
if line:
callback(SCREAMING_SNAKE_CASE )
else:
break
async def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=SCREAMING_SNAKE_CASE , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=SCREAMING_SNAKE_CASE , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__UpperCAmelCase = []
__UpperCAmelCase = []
def tee(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="" ):
__UpperCAmelCase = line.decode('''utf-8''' ).rstrip()
sink.append(SCREAMING_SNAKE_CASE )
if not quiet:
print(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , file=SCREAMING_SNAKE_CASE )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda SCREAMING_SNAKE_CASE : tee(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda SCREAMING_SNAKE_CASE : tee(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=SCREAMING_SNAKE_CASE , )
return _RunOutput(await p.wait() , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1_8_0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True ) -> _RunOutput:
'''simple docstring'''
__UpperCAmelCase = asyncio.get_event_loop()
__UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(SCREAMING_SNAKE_CASE , env=SCREAMING_SNAKE_CASE , stdin=SCREAMING_SNAKE_CASE , timeout=SCREAMING_SNAKE_CASE , quiet=SCREAMING_SNAKE_CASE , echo=SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = ''' '''.join(SCREAMING_SNAKE_CASE )
if result.returncode > 0:
__UpperCAmelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class A_ ( _a ):
'''simple docstring'''
pass
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple:
'''simple docstring'''
try:
__UpperCAmelCase = subprocess.check_output(SCREAMING_SNAKE_CASE , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(SCREAMING_SNAKE_CASE , '''decode''' ):
__UpperCAmelCase = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{' '.join(SCREAMING_SNAKE_CASE )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 333
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
A_ : str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
A_ : Optional[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
A_ : Union[str, Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 333
| 1
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ):
UpperCAmelCase_ = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=lowerCAmelCase__ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=lowerCAmelCase__ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=lowerCAmelCase__ )
return parser.parse_args()
def a__ ( ):
UpperCAmelCase_ = parse_args()
# Import training_script as a module.
UpperCAmelCase_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCAmelCase_ = script_fpath.stem
UpperCAmelCase_ = importlib.import_module(lowerCAmelCase__ )
# Patch sys.argv
UpperCAmelCase_ = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 357
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = ort.SessionOptions()
UpperCAmelCase_ = False
return options
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
UpperCAmelCase_ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A red cat sitting on a park bench"
UpperCAmelCase_ = np.random.RandomState(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_UpperCAmelCase , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 241
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCamelCase ( A__ , A__ ) -> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(A__ , A__ ) ) )
def __lowerCamelCase ( A__ , A__ ) -> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
UpperCamelCase = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(A__ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCamelCase = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(A__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCamelCase = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(A__ )
UpperCamelCase = []
for value in value_array:
UpperCamelCase = euclidean(A__ , dataset[0] )
UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCamelCase = euclidean(A__ , A__ )
if dist > temp_dist:
UpperCamelCase = temp_dist
UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCamelCase ( A__ , A__ ) -> float:
"""simple docstring"""
return np.dot(A__ , A__ ) / (norm(A__ ) * norm(A__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
|
from __future__ import annotations
def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] ): # noqa: E741
while r - l > 1:
UpperCamelCase_ : Union[str, Any] = (l + r) // 2
if v[m] >= key:
UpperCamelCase_ : str = m
else:
UpperCamelCase_ : List[Any] = m # noqa: E741
return r
def __lowercase ( lowerCamelCase : list[int] ):
if len(lowerCamelCase ) == 0:
return 0
UpperCamelCase_ : Tuple = [0] * len(lowerCamelCase )
UpperCamelCase_ : int = 1
UpperCamelCase_ : Dict = v[0]
for i in range(1 , len(lowerCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase_ : Any = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase_ : Dict = v[i]
length += 1
else:
UpperCamelCase_ : List[str] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175
| 0
|
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
__lowercase = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowercase = BASE_URL + """/user"""
# https://github.com/settings/tokens
__lowercase = os.environ.get("""USER_TOKEN""", """""")
def lowercase ( A_ )-> dict[Any, Any]:
'''simple docstring'''
a : str = {
"Authorization": F'''token {auth_token}''',
"Accept": "application/vnd.github.v3+json",
}
return requests.get(A_ , headers=A_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 226
|
"""simple docstring"""
from __future__ import annotations
class _A :
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : List[Any]=None):
a : int = data
a : Dict = None
def __repr__( self : Dict):
a : List[Any] = []
a : str = self
while temp:
string_rep.append(f'''{temp.data}''')
a : Tuple = temp.next
return "->".join(__UpperCAmelCase)
def lowercase ( A_ )-> Any:
'''simple docstring'''
if not elements_list:
raise Exception("The Elements List is empty" )
a : Any = Node(elements_list[0] )
for i in range(1 , len(A_ ) ):
a : int = Node(elements_list[i] )
a : Optional[Any] = current.next
return head
def lowercase ( A_ )-> None:
'''simple docstring'''
if head_node is not None and isinstance(A_ , A_ ):
print_reverse(head_node.next )
print(head_node.data )
def lowercase ( )-> List[Any]:
'''simple docstring'''
from doctest import testmod
testmod()
a : Union[str, Any] = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(A_ )
print("Elements in Reverse:" )
print_reverse(A_ )
if __name__ == "__main__":
main()
| 226
| 1
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = int(number**0.5 )
return number == sq * sq
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_lowerCAmelCase : List[str] = x_den * y_den * z_den
_lowerCAmelCase : Optional[Any] = gcd(A__ , A__ )
top //= hcf
bottom //= hcf
return top, bottom
def A ( _lowerCamelCase = 35 ):
'''simple docstring'''
_lowerCAmelCase : int = set()
_lowerCAmelCase : str = 42
_lowerCAmelCase : List[str] = Fraction(0 )
_lowerCAmelCase : Union[str, Any] = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_lowerCAmelCase : Optional[Any] = x_num * y_den + x_den * y_num
_lowerCAmelCase : Optional[int] = x_den * y_den
_lowerCAmelCase : Optional[int] = gcd(A__ , A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase : List[Any] = add_three(
A__ , A__ , A__ , A__ , A__ , A__ )
unique_s.add(A__ )
# n=2
_lowerCAmelCase : Optional[Any] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_lowerCAmelCase : List[str] = x_den * x_den * y_den * y_den
if is_sq(A__ ) and is_sq(A__ ):
_lowerCAmelCase : Tuple = int(sqrt(A__ ) )
_lowerCAmelCase : Optional[Any] = int(sqrt(A__ ) )
_lowerCAmelCase : List[str] = gcd(A__ , A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase : Any = add_three(
A__ , A__ , A__ , A__ , A__ , A__ )
unique_s.add(A__ )
# n=-1
_lowerCAmelCase : Tuple = x_num * y_num
_lowerCAmelCase : int = x_den * y_num + x_num * y_den
_lowerCAmelCase : Tuple = gcd(A__ , A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase : Dict = add_three(
A__ , A__ , A__ , A__ , A__ , A__ )
unique_s.add(A__ )
# n=2
_lowerCAmelCase : Dict = x_num * x_num * y_num * y_num
_lowerCAmelCase : Optional[int] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(A__ ) and is_sq(A__ ):
_lowerCAmelCase : str = int(sqrt(A__ ) )
_lowerCAmelCase : Optional[Any] = int(sqrt(A__ ) )
_lowerCAmelCase : Optional[Any] = gcd(A__ , A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase : Optional[int] = add_three(
A__ , A__ , A__ , A__ , A__ , A__ )
unique_s.add(A__ )
for num, den in unique_s:
total += Fraction(A__ , A__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 36
|
'''simple docstring'''
import math
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[Any]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
UpperCamelCase = n
UpperCamelCase = [
[math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ )
] # adjacency matrix for weight
UpperCamelCase = [
[math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ )
] # dp[i][j] stores minimum distance from i to j
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = w
def A ( self : str ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
_lowerCamelCase : List[str] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 28
| 0
|
"""simple docstring"""
def _UpperCAmelCase ( ) -> int:
return 1
def _UpperCAmelCase ( __lowerCamelCase : int ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def _UpperCAmelCase ( __lowerCamelCase : int ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : int ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : int ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : int ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : int ) -> int:
return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : int ) -> int:
return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : int = 2_00 ) -> int:
return two_pound(__lowerCamelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 40
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCAmelCase__ = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase__ = {
'allenai/led-base-16384': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _UpperCAmelCase ( ) -> Union[str, Any]:
_snake_case = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_snake_case = bs[:]
_snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCamelCase )
cs.append(2**8 + n )
n += 1
_snake_case = [chr(__lowerCamelCase ) for n in cs]
return dict(zip(__lowerCamelCase , __lowerCamelCase ) )
def _UpperCAmelCase ( __lowerCamelCase : Any ) -> List[Any]:
_snake_case = set()
_snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case = char
return pairs
class lowerCAmelCase__ ( A_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["""input_ids""", """attention_mask"""]
def __init__( self : str , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int]="replace" , _lowerCamelCase : Dict="<s>" , _lowerCamelCase : Optional[Any]="</s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="<unk>" , _lowerCamelCase : Any="<pad>" , _lowerCamelCase : Union[str, Any]="<mask>" , _lowerCamelCase : Optional[int]=False , **_lowerCamelCase : str , ):
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else bos_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else eos_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else sep_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else cls_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else unk_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , **_lowerCamelCase , )
with open(_lowerCamelCase , encoding='''utf-8''' ) as vocab_handle:
_snake_case = json.load(_lowerCamelCase )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = errors # how to handle errors in decoding
_snake_case = bytes_to_unicode()
_snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
_snake_case = merges_handle.read().split('''\n''' )[1:-1]
_snake_case = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
_snake_case = {}
_snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowercase ( self : Tuple ):
return len(self.encoder )
def lowercase ( self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase ( self : Dict , _lowerCamelCase : str ):
if token in self.cache:
return self.cache[token]
_snake_case = tuple(_lowerCamelCase )
_snake_case = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
_snake_case = min(_lowerCamelCase , key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case , _snake_case = bigram
_snake_case = []
_snake_case = 0
while i < len(_lowerCamelCase ):
try:
_snake_case = word.index(_lowerCamelCase , _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case = tuple(_lowerCamelCase )
_snake_case = new_word
if len(_lowerCamelCase ) == 1:
break
else:
_snake_case = get_pairs(_lowerCamelCase )
_snake_case = ''' '''.join(_lowerCamelCase )
_snake_case = word
return word
def lowercase ( self : str , _lowerCamelCase : Dict ):
_snake_case = []
for token in re.findall(self.pat , _lowerCamelCase ):
_snake_case = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(''' ''' ) )
return bpe_tokens
def lowercase ( self : Optional[Any] , _lowerCamelCase : List[str] ):
return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase ( self : Optional[int] , _lowerCamelCase : Dict ):
return self.decoder.get(_lowerCamelCase )
def lowercase ( self : Dict , _lowerCamelCase : Union[str, Any] ):
_snake_case = ''''''.join(_lowerCamelCase )
_snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowercase ( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_snake_case = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCamelCase , ensure_ascii=_lowerCamelCase ) + '''\n''' )
_snake_case = 0
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_snake_case = token_index
writer.write(''' '''.join(_lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowercase ( self : str , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase ( self : Tuple , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def lowercase ( self : Optional[int] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self : Any , _lowerCamelCase : int , _lowerCamelCase : Any=False , **_lowerCamelCase : List[Any] ):
_snake_case = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()):
_snake_case = ''' ''' + text
return (text, kwargs)
def lowercase ( self : int , _lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[bool] = None , ):
_snake_case = super()._pad(
encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
_snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase )
if needs_to_be_padded:
_snake_case = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
_snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 40
| 1
|
def _a ( lowerCamelCase: Tuple = 10_00 ) -> List[Any]:
'''simple docstring'''
__A = 2**power
__A = 0
while n:
__A , __A = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 117
|
'''simple docstring'''
lowerCAmelCase__ = '''Input must be a string of 8 numbers plus letter'''
lowerCAmelCase__ = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def _A ( A__ ):
"""simple docstring"""
if not isinstance(A__ , A__ ):
__lowercase = F"Expected string as input, found {type(A__ ).__name__}"
raise TypeError(A__ )
__lowercase = spanish_id.replace('''-''' , '''''' ).upper()
if len(A__ ) != 9:
raise ValueError(A__ )
try:
__lowercase = int(spanish_id_clean[0:8] )
__lowercase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(A__ ) from ex
if letter.isdigit():
raise ValueError(A__ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
| 0
|
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowerCAmelCase__ ( _a : str , _a : str , **_a : Any ):
snake_case_ : Optional[int] = AutoConfig.from_pretrained(_a , **_a )
snake_case_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_config(_a )
model.save_pretrained(_a )
AutoTokenizer.from_pretrained(_a ).save_pretrained(_a )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 364
|
import datasets
from .evaluate import evaluate
lowercase : Dict = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
lowercase : int = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
lowercase : int = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ : Union[str, Any] = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
snake_case_ : Optional[Any] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
snake_case_ : Any = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE )
return score
| 36
| 0
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( A__ , A__ , unittest.TestCase ):
a__ = IFInpaintingSuperResolutionPipeline
a__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
a__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
a__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self , lowercase , lowercase=0) -> int:
'''simple docstring'''
if str(a_).startswith('mps'):
a__: List[Any] = torch.manual_seed(a_)
else:
a__: str = torch.Generator(device=a_).manual_seed(a_)
a__: List[str] = floats_tensor((1, 3, 16, 16) , rng=random.Random(a_)).to(a_)
a__: Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_)).to(a_)
a__: int = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_)).to(a_)
a__: Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
self._test_save_load_local()
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 290
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : str = ["""image_processor""", """tokenizer"""]
a_ : List[str] = """ViTImageProcessor"""
a_ : List[str] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : List[str] , a_ : str=None , a_ : Dict=None , **a_ : List[Any] ):
lowerCAmelCase_ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a_ , )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("feature_extractor" )
lowerCAmelCase_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a_ , a_ )
def __call__( self : Union[str, Any] , a_ : Any=None , a_ : Dict=None , a_ : List[str]=None , a_ : str=None , **a_ : Any ):
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCAmelCase_ : Optional[Any] = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if visual_prompt is not None:
lowerCAmelCase_ : Optional[Any] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if images is not None:
lowerCAmelCase_ : List[str] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if visual_prompt is not None and images is not None:
lowerCAmelCase_ : Union[str, Any] = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase_ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase_ : Dict = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def lowerCamelCase ( self : Optional[int] , *a_ : Optional[Any] , **a_ : List[str] ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def lowerCamelCase ( self : Optional[Any] , *a_ : Tuple , **a_ : Tuple ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def lowerCamelCase ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , )
return self.image_processor_class
@property
def lowerCamelCase ( self : Dict ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , )
return self.image_processor
| 241
| 0
|
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCAmelCase__ ( a__, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = BertJapaneseTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def A_ ( self ):
super().setUp()
_lowerCamelCase : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
_lowerCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self , lowercase ):
_lowerCamelCase : Tuple = '''こんにちは、世界。 \nこんばんは、世界。'''
_lowerCamelCase : str = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def A_ ( self , lowercase ):
_lowerCamelCase : int = self.get_input_output_texts(_lowerCamelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = tokenizer.decode(_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
return text, ids
def A_ ( self ):
pass # TODO add if relevant
def A_ ( self ):
pass # TODO add if relevant
def A_ ( self ):
pass # TODO add if relevant
def A_ ( self ):
_lowerCamelCase : str = self.tokenizer_class(self.vocab_file )
_lowerCamelCase : Optional[Any] = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(_lowerCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def A_ ( self ):
_lowerCamelCase : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = '''こんにちは、世界。\nこんばんは、世界。'''
_lowerCamelCase : Optional[int] = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_lowerCamelCase , 'wb' ) as handle:
pickle.dump(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , 'rb' ) as handle:
_lowerCamelCase : Any = pickle.load(_lowerCamelCase )
_lowerCamelCase : Tuple = tokenizer_new.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def A_ ( self ):
try:
_lowerCamelCase : Any = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def A_ ( self ):
try:
_lowerCamelCase : str = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = MecabTokenizer(do_lower_case=_lowerCamelCase , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def A_ ( self ):
try:
_lowerCamelCase : List[str] = MecabTokenizer(
do_lower_case=_lowerCamelCase , normalize_text=_lowerCamelCase , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def A_ ( self ):
_lowerCamelCase : Any = MecabTokenizer(normalize_text=_lowerCamelCase , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(_lowerCamelCase )
_lowerCamelCase : List[str] = '''こんにちは、世界。\nこんばんは、世界。'''
_lowerCamelCase : Dict = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_lowerCamelCase , 'wb' ) as handle:
pickle.dump(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , 'rb' ) as handle:
_lowerCamelCase : int = pickle.load(_lowerCamelCase )
_lowerCamelCase : Optional[int] = tokenizer_new.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : List[Any] = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : Any = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : Dict = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : str = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : Dict = SudachiTokenizer(do_lower_case=_lowerCamelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : Optional[int] = SudachiTokenizer(normalize_text=_lowerCamelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : Any = SudachiTokenizer(trim_whitespace=_lowerCamelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def A_ ( self ):
_lowerCamelCase : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(_lowerCamelCase )
_lowerCamelCase : Dict = '''こんにちは、世界。\nこんばんは、世界。'''
_lowerCamelCase : Any = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_lowerCamelCase , 'wb' ) as handle:
pickle.dump(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , 'rb' ) as handle:
_lowerCamelCase : Any = pickle.load(_lowerCamelCase )
_lowerCamelCase : Tuple = tokenizer_new.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
@require_jumanpp
def A_ ( self ):
_lowerCamelCase : List[str] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def A_ ( self ):
_lowerCamelCase : Dict = JumanppTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def A_ ( self ):
_lowerCamelCase : Any = JumanppTokenizer(normalize_text=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def A_ ( self ):
_lowerCamelCase : Optional[int] = JumanppTokenizer(trim_whitespace=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def A_ ( self ):
_lowerCamelCase : Optional[int] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def A_ ( self ):
_lowerCamelCase : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
_lowerCamelCase : List[Any] = {}
for i, token in enumerate(_lowerCamelCase ):
_lowerCamelCase : Any = i
_lowerCamelCase : Tuple = WordpieceTokenizer(vocab=_lowerCamelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def A_ ( self ):
_lowerCamelCase : str = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
_lowerCamelCase : List[str] = tokenizer.subword_tokenizer
_lowerCamelCase : str = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(_lowerCamelCase , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
_lowerCamelCase : int = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(_lowerCamelCase , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
_lowerCamelCase : Optional[int] = tokenizer.encode('ありがとう。' , add_special_tokens=_lowerCamelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode('どういたしまして。' , add_special_tokens=_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
_lowerCamelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase__ ( a__, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = BertJapaneseTokenizer
lowerCamelCase__ = False
def A_ ( self ):
super().setUp()
_lowerCamelCase : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self , **lowercase ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **_lowerCamelCase )
def A_ ( self , lowercase ):
_lowerCamelCase : int = '''こんにちは、世界。 \nこんばんは、世界。'''
_lowerCamelCase : Optional[int] = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def A_ ( self ):
pass # TODO add if relevant
def A_ ( self ):
pass # TODO add if relevant
def A_ ( self ):
pass # TODO add if relevant
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
_lowerCamelCase : List[str] = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
_lowerCamelCase , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def A_ ( self ):
_lowerCamelCase : Any = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
_lowerCamelCase : str = {}
for i, token in enumerate(_lowerCamelCase ):
_lowerCamelCase : Optional[int] = i
_lowerCamelCase : Dict = CharacterTokenizer(vocab=_lowerCamelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def A_ ( self ):
_lowerCamelCase : str = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
_lowerCamelCase : Optional[Any] = tokenizer.encode('ありがとう。' , add_special_tokens=_lowerCamelCase )
_lowerCamelCase : str = tokenizer.encode('どういたしまして。' , add_special_tokens=_lowerCamelCase )
_lowerCamelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
_lowerCamelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : str = '''cl-tohoku/bert-base-japanese'''
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(_lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
_lowerCamelCase : Optional[Any] = '''bert-base-cased'''
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(_lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 368
|
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase ):
_lowerCamelCase : Dict = question_encoder
_lowerCamelCase : List[Any] = generator
_lowerCamelCase : Optional[Any] = self.question_encoder
def A_ ( self , lowercase ):
if os.path.isfile(lowercase ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowercase , exist_ok=lowercase )
_lowerCamelCase : List[Any] = os.path.join(lowercase , 'question_encoder_tokenizer' )
_lowerCamelCase : Dict = os.path.join(lowercase , 'generator_tokenizer' )
self.question_encoder.save_pretrained(lowercase )
self.generator.save_pretrained(lowercase )
@classmethod
def A_ ( cls , lowercase , **lowercase ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCamelCase : Optional[int] = kwargs.pop('config' , lowercase )
if config is None:
_lowerCamelCase : int = RagConfig.from_pretrained(lowercase )
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
lowercase , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(
lowercase , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=lowercase , generator=lowercase )
def __call__( self , *lowercase , **lowercase ):
return self.current_tokenizer(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.generator.batch_decode(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.generator.decode(*lowercase , **lowercase )
def A_ ( self ):
_lowerCamelCase : Any = self.question_encoder
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.generator
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = "longest" , lowercase = None , lowercase = True , **lowercase , ):
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , lowercase , )
if max_length is None:
_lowerCamelCase : Optional[Any] = self.current_tokenizer.model_max_length
_lowerCamelCase : Optional[Any] = self(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , max_length=lowercase , padding=lowercase , truncation=lowercase , **lowercase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCamelCase : int = self.current_tokenizer.model_max_length
_lowerCamelCase : str = self(
text_target=lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , **lowercase , )
_lowerCamelCase : int = labels['input_ids']
return model_inputs
| 12
| 0
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *a_ : Union[str, Any] , a_ : Any=None , a_ : Any=None , **a_ : Any ):
'''simple docstring'''
super().__init__(*a_ , **a_ )
__UpperCAmelCase : int = eval_examples
__UpperCAmelCase : Tuple = post_process_function
def snake_case__ ( self : Union[str, Any] , a_ : Tuple=None , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : str = "eval" ):
'''simple docstring'''
__UpperCAmelCase : Any = self.eval_dataset if eval_dataset is None else eval_dataset
__UpperCAmelCase : Union[str, Any] = self.get_eval_dataloader(a_ )
__UpperCAmelCase : Dict = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCAmelCase : str = self.compute_metrics
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__UpperCAmelCase : Optional[int] = time.time()
try:
__UpperCAmelCase : str = eval_loop(
a_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a_ , metric_key_prefix=a_ , )
finally:
__UpperCAmelCase : int = compute_metrics
__UpperCAmelCase : List[Any] = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
a_ , a_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__UpperCAmelCase : Tuple = self.post_process_function(a_ , a_ , output.predictions )
__UpperCAmelCase : Dict = self.compute_metrics(a_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
__UpperCAmelCase : Optional[int] = metrics.pop(a_ )
metrics.update(output.metrics )
else:
__UpperCAmelCase : Dict = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(a_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__UpperCAmelCase : Dict = self.callback_handler.on_evaluate(self.args , self.state , self.control , a_ )
return metrics
def snake_case__ ( self : Dict , a_ : str , a_ : Union[str, Any] , a_ : Any=None , a_ : str = "test" ):
'''simple docstring'''
__UpperCAmelCase : str = self.get_test_dataloader(a_ )
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCAmelCase : List[Any] = self.compute_metrics
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__UpperCAmelCase : Tuple = time.time()
try:
__UpperCAmelCase : str = eval_loop(
a_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a_ , metric_key_prefix=a_ , )
finally:
__UpperCAmelCase : Optional[Any] = compute_metrics
__UpperCAmelCase : Optional[int] = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
a_ , a_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__UpperCAmelCase : List[Any] = self.post_process_function(a_ , a_ , output.predictions , '''predict''' )
__UpperCAmelCase : List[Any] = self.compute_metrics(a_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
__UpperCAmelCase : Any = metrics.pop(a_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=a_ )
| 226
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__A =logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ["""pixel_values"""]
def __init__( self : Tuple , a_ : bool = True , a_ : Dict[str, int] = None , a_ : PILImageResampling = PILImageResampling.BICUBIC , a_ : bool = True , a_ : Dict[str, int] = None , a_ : bool = True , a_ : Union[int, float] = 1 / 2_55 , a_ : bool = True , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , a_ : bool = True , **a_ : str , ):
'''simple docstring'''
super().__init__(**a_ )
__UpperCAmelCase : List[Any] = size if size is not None else {'''shortest_edge''': 2_24}
__UpperCAmelCase : List[str] = get_size_dict(a_ , default_to_square=a_ )
__UpperCAmelCase : int = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
__UpperCAmelCase : Optional[int] = get_size_dict(a_ , default_to_square=a_ , param_name='''crop_size''' )
__UpperCAmelCase : int = do_resize
__UpperCAmelCase : Union[str, Any] = size
__UpperCAmelCase : Union[str, Any] = resample
__UpperCAmelCase : Any = do_center_crop
__UpperCAmelCase : Any = crop_size
__UpperCAmelCase : Any = do_rescale
__UpperCAmelCase : Dict = rescale_factor
__UpperCAmelCase : Union[str, Any] = do_normalize
__UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCAmelCase : int = image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCAmelCase : List[str] = do_convert_rgb
def snake_case__ ( self : Optional[Any] , a_ : np.ndarray , a_ : Dict[str, int] , a_ : PILImageResampling = PILImageResampling.BICUBIC , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Optional[int] , ):
'''simple docstring'''
__UpperCAmelCase : Dict = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__UpperCAmelCase : Optional[int] = get_resize_output_image_size(a_ , size=size['''shortest_edge'''] , default_to_square=a_ )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def snake_case__ ( self : Union[str, Any] , a_ : np.ndarray , a_ : Dict[str, int] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Dict , ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(a_ , size=(size['''height'''], size['''width''']) , data_format=a_ , **a_ )
def snake_case__ ( self : Union[str, Any] , a_ : np.ndarray , a_ : Union[int, float] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : List[str] , ):
'''simple docstring'''
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def snake_case__ ( self : Optional[Any] , a_ : np.ndarray , a_ : Union[float, List[float]] , a_ : Union[float, List[float]] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Dict , ):
'''simple docstring'''
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def snake_case__ ( self : Any , a_ : ImageInput , a_ : bool = None , a_ : Dict[str, int] = None , a_ : PILImageResampling = None , a_ : bool = None , a_ : int = None , a_ : bool = None , a_ : float = None , a_ : bool = None , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , a_ : bool = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **a_ : Dict , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : Optional[Any] = size if size is not None else self.size
__UpperCAmelCase : Dict = get_size_dict(a_ , param_name='''size''' , default_to_square=a_ )
__UpperCAmelCase : int = resample if resample is not None else self.resample
__UpperCAmelCase : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase : Any = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase : Dict = get_size_dict(a_ , param_name='''crop_size''' , default_to_square=a_ )
__UpperCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : Optional[int] = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase : Tuple = image_std if image_std is not None else self.image_std
__UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCAmelCase : List[str] = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCAmelCase : Optional[Any] = [convert_to_rgb(a_ ) for image in images]
# All transformations expect numpy arrays.
__UpperCAmelCase : int = [to_numpy_array(a_ ) for image in images]
if do_resize:
__UpperCAmelCase : int = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_center_crop:
__UpperCAmelCase : List[str] = [self.center_crop(image=a_ , size=a_ ) for image in images]
if do_rescale:
__UpperCAmelCase : Dict = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_normalize:
__UpperCAmelCase : Optional[int] = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images]
__UpperCAmelCase : Optional[int] = [to_channel_dimension_format(a_ , a_ ) for image in images]
__UpperCAmelCase : Union[str, Any] = {'''pixel_values''': images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 226
| 1
|
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = tmp_path / '''cache'''
lowercase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase = JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = tmp_path / '''cache'''
lowercase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowercase = features.copy() if features else default_expected_features
lowercase = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase = JsonDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = tmp_path / '''cache'''
lowercase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
lowercase = features.copy() if features else default_expected_features
lowercase = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase = JsonDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
lowercase = features.copy()
lowercase = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase = tmp_path / '''cache'''
lowercase = JsonDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = tmp_path / '''cache'''
lowercase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowercase = JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , split=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = jsonl_path
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = [jsonl_path]
lowercase = tmp_path / '''cache'''
lowercase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowercase = JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=("train",) ):
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
for split in splits:
lowercase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = tmp_path / '''cache'''
lowercase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_json_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = tmp_path / '''cache'''
lowercase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowercase = features.copy() if features else default_expected_features
lowercase = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase = JsonDatasetReader({'''train''': jsonl_path} , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if split:
lowercase = {split: jsonl_path}
else:
lowercase = '''train'''
lowercase = {'''train''': jsonl_path, '''test''': jsonl_path}
lowercase = tmp_path / '''cache'''
lowercase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowercase = JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return json.load(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return [json.loads(lowerCAmelCase__ ) for line in buffer]
class lowercase :
@pytest.mark.parametrize('''lines, load_json_function''' ,[(True, load_json_lines), (False, load_json)])
def A__ ( self ,A__ ,A__ ,A__):
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ ,A__ ,lines=A__).write()
buffer.seek(0)
lowercase = load_json_function(A__)
assert isinstance(A__ ,A__)
assert isinstance(exported_content[0] ,A__)
assert len(A__) == 1_0
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' ,[
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789'''), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] ,)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__):
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ ,A__ ,lines=A__ ,orient=A__).write()
buffer.seek(0)
lowercase = load_json(A__)
assert isinstance(A__ ,A__)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(A__ ,'''keys''') and not hasattr(exported_content[0] ,'''keys''')
if len_at:
assert len(exported_content[len_at]) == 1_0
else:
assert len(A__) == 1_0
@pytest.mark.parametrize('''lines, load_json_function''' ,[(True, load_json_lines), (False, load_json)])
def A__ ( self ,A__ ,A__ ,A__):
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ ,A__ ,lines=A__ ,num_proc=2).write()
buffer.seek(0)
lowercase = load_json_function(A__)
assert isinstance(A__ ,A__)
assert isinstance(exported_content[0] ,A__)
assert len(A__) == 1_0
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' ,[
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789'''), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] ,)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__):
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ ,A__ ,lines=A__ ,orient=A__ ,num_proc=2).write()
buffer.seek(0)
lowercase = load_json(A__)
assert isinstance(A__ ,A__)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(A__ ,'''keys''') and not hasattr(exported_content[0] ,'''keys''')
if len_at:
assert len(exported_content[len_at]) == 1_0
else:
assert len(A__) == 1_0
def A__ ( self ,A__):
with pytest.raises(A__):
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ ,A__ ,num_proc=0)
@pytest.mark.parametrize('''compression, extension''' ,[('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')])
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = tmp_path_factory.mktemp('''data''') / f'test.json.{extension}'
lowercase = str(shared_datadir / f'test_file.json.{extension}')
JsonDatasetWriter(A__ ,A__ ,compression=A__).write()
with fsspec.open(A__ ,'''rb''' ,compression='''infer''') as f:
lowercase = f.read()
with fsspec.open(A__ ,'''rb''' ,compression='''infer''') as f:
lowercase = f.read()
assert exported_content == original_content
| 368
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase__ :Optional[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ :int = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase__ :List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowercase__ :List[str] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'config.{attribute}' in modeling_source
or f'getattr(config, "{attribute}"' in modeling_source
or f'getattr(self.config, "{attribute}"' in modeling_source
):
lowercase = True
# Deal with multi-line cases
elif (
re.search(
Rf'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , lowerCAmelCase__ , )
is not None
):
lowercase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowercase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowercase = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
lowercase = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
lowercase = True
if not attribute_used:
lowercase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowercase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowercase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowercase = True
elif attribute.endswith('''_token_id''' ):
lowercase = True
# configuration class specific cases
if not case_allowed:
lowercase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowercase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = dict(inspect.signature(config_class.__init__ ).parameters )
lowercase = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
lowercase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowercase = {}
if len(config_class.attribute_map ) > 0:
lowercase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowercase = inspect.getsourcefile(lowerCAmelCase__ )
lowercase = os.path.dirname(lowerCAmelCase__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowercase = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for fn in os.listdir(lowerCAmelCase__ ) if fn.startswith('''modeling_''' )]
# Get the source code strings
lowercase = []
for path in modeling_paths:
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as fp:
modeling_sources.append(fp.read() )
lowercase = []
for config_param, default_value in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
# `attributes` here is all the variant names for `config_param`
lowercase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
unused_attributes.append(attributes[0] )
return sorted(lowerCAmelCase__ )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowercase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowerCAmelCase__ : inspect.isclass(lowerCAmelCase__ )
and issubclass(lowerCAmelCase__ , lowerCAmelCase__ )
and inspect.getmodule(lowerCAmelCase__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowercase = check_config_attributes_being_used(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
lowercase = unused_attributes
if len(lowerCAmelCase__ ) > 0:
lowercase = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f'{name}: {attributes}\n'
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
check_config_attributes()
| 97
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__lowercase = None
__lowercase = logging.get_logger(__name__)
__lowercase = """▁"""
__lowercase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__lowercase = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
__lowercase = {
"""google/pegasus-xsum""": 512,
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Optional[Any] = PegasusTokenizer
UpperCAmelCase : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[Any]="<pad>" , __UpperCAmelCase : Optional[int]="</s>" , __UpperCAmelCase : Optional[int]="<unk>" , __UpperCAmelCase : str="<mask_2>" , __UpperCAmelCase : List[str]="<mask_1>" , __UpperCAmelCase : str=None , __UpperCAmelCase : Any=103 , **__UpperCAmelCase : Union[str, Any] , ):
a : List[Any] = offset
if additional_special_tokens is not None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase):
raise TypeError(
f'''additional_special_tokens should be of type {type(__UpperCAmelCase)}, but is'''
f''' {type(__UpperCAmelCase)}''')
a : Dict = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(__UpperCAmelCase) , self.offset - 1)
]
if len(set(__UpperCAmelCase)) != len(__UpperCAmelCase):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''')
a : int = additional_special_tokens_extended
else:
a : List[str] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset)]
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , pad_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , mask_token_sent=__UpperCAmelCase , offset=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
a : str = vocab_file
a : str = False if not self.vocab_file else True
def __snake_case ( self : Tuple , __UpperCAmelCase : int):
a : int = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens) + 3)):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f''' {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}''')
return [1 if x in all_special_ids else 0 for x in seq]
def __snake_case ( self : Any , __UpperCAmelCase : List , __UpperCAmelCase : Optional[List] = None , __UpperCAmelCase : bool = False):
if already_has_special_tokens:
return self._special_token_mask(__UpperCAmelCase)
elif token_ids_a is None:
return self._special_token_mask(__UpperCAmelCase) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int]=None):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __snake_case ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__UpperCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
a : Dict = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__UpperCAmelCase):
copyfile(self.vocab_file , __UpperCAmelCase)
return (out_vocab_file,)
| 40
|
"""simple docstring"""
def lowercase ( A_ )-> str:
'''simple docstring'''
if isinstance(A_ , A_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(A_ , A_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
a : Optional[Any] = False
if num < 0:
a : Tuple = True
a : str = -num
a : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(A_ ) for e in binary )
return "0b" + "".join(str(A_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__A = logging.get_logger(__name__)
@add_end_docstrings(
__magic_name__ , r'''\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ''' , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def lowercase_ ( self , lowerCamelCase__ ) -> np.ndarray:
'''simple docstring'''
if self.framework == "tf":
__lowerCamelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__lowerCamelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__A )
else:
raise ValueError('Unsupported framework' )
return masked_index
def lowercase_ ( self , lowerCamelCase__ ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase = self.get_masked_index(__A )
__lowerCamelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
if isinstance(__A , __A ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__A )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ) -> Dict[str, GenericTensor]:
'''simple docstring'''
if return_tensors is None:
__lowerCamelCase = self.framework
__lowerCamelCase = self.tokenizer(__A , return_tensors=__A )
self.ensure_exactly_one_mask_token(__A )
return model_inputs
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = self.model(**__A )
__lowerCamelCase = model_inputs['input_ids']
return model_outputs
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=None ) -> Any:
'''simple docstring'''
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__lowerCamelCase = target_ids.shape[0]
__lowerCamelCase = model_outputs['input_ids'][0]
__lowerCamelCase = model_outputs['logits']
if self.framework == "tf":
__lowerCamelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__lowerCamelCase = outputs.numpy()
__lowerCamelCase = outputs[0, masked_index, :]
__lowerCamelCase = stable_softmax(__A , axis=-1 )
if target_ids is not None:
__lowerCamelCase = tf.gather_nd(tf.squeeze(__A , 0 ) , target_ids.reshape(-1 , 1 ) )
__lowerCamelCase = tf.expand_dims(__A , 0 )
__lowerCamelCase = tf.math.top_k(__A , k=__A )
__lowerCamelCase , __lowerCamelCase = topk.values.numpy(), topk.indices.numpy()
else:
__lowerCamelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__A ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__lowerCamelCase = outputs[0, masked_index, :]
__lowerCamelCase = logits.softmax(dim=-1 )
if target_ids is not None:
__lowerCamelCase = probs[..., target_ids]
__lowerCamelCase , __lowerCamelCase = probs.topk(__A )
__lowerCamelCase = []
__lowerCamelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__lowerCamelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__lowerCamelCase = input_ids.numpy().copy()
if target_ids is not None:
__lowerCamelCase = target_ids[p].tolist()
__lowerCamelCase = p
# Filter padding out:
__lowerCamelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__lowerCamelCase = self.tokenizer.decode(__A , skip_special_tokens=__A )
__lowerCamelCase = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(__A )
result.append(__A )
if single_mask:
return result[0]
return result
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> Optional[int]:
'''simple docstring'''
if isinstance(__A , __A ):
__lowerCamelCase = [targets]
try:
__lowerCamelCase = self.tokenizer.get_vocab()
except Exception:
__lowerCamelCase = {}
__lowerCamelCase = []
for target in targets:
__lowerCamelCase = vocab.get(__A , __A )
if id_ is None:
__lowerCamelCase = self.tokenizer(
__A , add_special_tokens=__A , return_attention_mask=__A , return_token_type_ids=__A , max_length=1 , truncation=__A , )['input_ids']
if len(__A ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
'We cannot replace it with anything meaningful, ignoring it' )
continue
__lowerCamelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
__lowerCamelCase = list(set(__A ) )
if len(__A ) == 0:
raise ValueError('At least one target must be provided when passed.' )
__lowerCamelCase = np.array(__A )
return target_ids
def lowercase_ ( self , lowerCamelCase__=None , lowerCamelCase__=None ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = {}
if targets is not None:
__lowerCamelCase = self.get_target_ids(__A , __A )
__lowerCamelCase = target_ids
if top_k is not None:
__lowerCamelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = super().__call__(__A , **__A )
if isinstance(__A , __A ) and len(__A ) == 1:
return outputs[0]
return outputs
| 359
|
import requests
__A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__lowerCamelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 348
| 0
|
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowercase_ = NewType("""DataClass""", Any)
lowercase_ = NewType("""DataClassType""", Any)
def lowerCamelCase ( __lowerCamelCase : str ) ->int:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def lowerCamelCase ( __lowerCamelCase : list ) ->Callable[[str], Any]:
_SCREAMING_SNAKE_CASE = {str(__lowerCamelCase ): choice for choice in choices}
return lambda __lowerCamelCase : str_to_choice.get(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase ( *,
__lowerCamelCase : Union[str, List[str]] = None , __lowerCamelCase : str = None , __lowerCamelCase : Any = dataclasses.MISSING , __lowerCamelCase : Callable[[], Any] = dataclasses.MISSING , __lowerCamelCase : dict = None , **__lowerCamelCase : Optional[Any] , ) ->dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_SCREAMING_SNAKE_CASE = {}
if aliases is not None:
_SCREAMING_SNAKE_CASE = aliases
if help is not None:
_SCREAMING_SNAKE_CASE = help
return dataclasses.field(metadata=__lowerCamelCase , default=__lowerCamelCase , default_factory=__lowerCamelCase , **__lowerCamelCase )
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = 42
def __init__( self , A , **A ) -> int:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
_SCREAMING_SNAKE_CASE = ArgumentDefaultsHelpFormatter
super().__init__(**A )
if dataclasses.is_dataclass(A ):
_SCREAMING_SNAKE_CASE = [dataclass_types]
_SCREAMING_SNAKE_CASE = list(A )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(A )
@staticmethod
def snake_case_( A , A ) -> int:
_SCREAMING_SNAKE_CASE = f'--{field.name}'
_SCREAMING_SNAKE_CASE = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , A ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
_SCREAMING_SNAKE_CASE = kwargs.pop("""aliases""" , [] )
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = [aliases]
_SCREAMING_SNAKE_CASE = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(A , """UnionType""" ) and isinstance(A , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(A ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
f' Problem encountered in field \'{field.name}\'.' )
if type(A ) not in field.type.__args__:
# filter `str` in Union
_SCREAMING_SNAKE_CASE = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_SCREAMING_SNAKE_CASE = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_SCREAMING_SNAKE_CASE = (
field.type.__args__[0] if isinstance(A , field.type.__args__[1] ) else field.type.__args__[1]
)
_SCREAMING_SNAKE_CASE = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_SCREAMING_SNAKE_CASE = {}
if origin_type is Literal or (isinstance(field.type , A ) and issubclass(field.type , A )):
if origin_type is Literal:
_SCREAMING_SNAKE_CASE = field.type.__args__
else:
_SCREAMING_SNAKE_CASE = [x.value for x in field.type]
_SCREAMING_SNAKE_CASE = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
_SCREAMING_SNAKE_CASE = field.default
else:
_SCREAMING_SNAKE_CASE = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_SCREAMING_SNAKE_CASE = copy(A )
# Hack because type=bool in argparse does not behave as we want.
_SCREAMING_SNAKE_CASE = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_SCREAMING_SNAKE_CASE = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_SCREAMING_SNAKE_CASE = default
# This tells argparse we accept 0 or 1 value after --field_name
_SCREAMING_SNAKE_CASE = """?"""
# This is the value that will get picked if we do --field_name (without value)
_SCREAMING_SNAKE_CASE = True
elif isclass(A ) and issubclass(A , A ):
_SCREAMING_SNAKE_CASE = field.type.__args__[0]
_SCREAMING_SNAKE_CASE = """+"""
if field.default_factory is not dataclasses.MISSING:
_SCREAMING_SNAKE_CASE = field.default_factory()
elif field.default is dataclasses.MISSING:
_SCREAMING_SNAKE_CASE = True
else:
_SCREAMING_SNAKE_CASE = field.type
if field.default is not dataclasses.MISSING:
_SCREAMING_SNAKE_CASE = field.default
elif field.default_factory is not dataclasses.MISSING:
_SCREAMING_SNAKE_CASE = field.default_factory()
else:
_SCREAMING_SNAKE_CASE = True
parser.add_argument(A , *A , **A )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_SCREAMING_SNAKE_CASE = False
parser.add_argument(f'--no_{field.name}' , action="""store_false""" , dest=field.name , **A )
def snake_case_( self , A ) -> Dict:
if hasattr(A , """_argument_group_name""" ):
_SCREAMING_SNAKE_CASE = self.add_argument_group(dtype._argument_group_name )
else:
_SCREAMING_SNAKE_CASE = self
try:
_SCREAMING_SNAKE_CASE = get_type_hints(A )
except NameError:
raise RuntimeError(
f'Type resolution failed for {dtype}. Try declaring the class in global scope or '
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A ):
_SCREAMING_SNAKE_CASE = """.""".join(map(A , sys.version_info[:3] ) )
raise RuntimeError(
f'Type resolution failed for {dtype} on Python {python_version}. Try removing '
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(A ):
if not field.init:
continue
_SCREAMING_SNAKE_CASE = type_hints[field.name]
self._parse_dataclass_field(A , A )
def snake_case_( self , A=None , A=False , A=True , A=None , A=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_SCREAMING_SNAKE_CASE = []
if args_filename:
args_files.append(Path(A ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_SCREAMING_SNAKE_CASE = ArgumentParser()
args_file_parser.add_argument(A , type=A , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = args_file_parser.parse_known_args(args=A )
_SCREAMING_SNAKE_CASE = vars(A ).get(args_file_flag.lstrip("""-""" ) , A )
if cmd_args_file_paths:
args_files.extend([Path(A ) for p in cmd_args_file_paths] )
_SCREAMING_SNAKE_CASE = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_SCREAMING_SNAKE_CASE = file_args + args if args is not None else file_args + sys.argv[1:]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.parse_known_args(args=A )
_SCREAMING_SNAKE_CASE = []
for dtype in self.dataclass_types:
_SCREAMING_SNAKE_CASE = {f.name for f in dataclasses.fields(A ) if f.init}
_SCREAMING_SNAKE_CASE = {k: v for k, v in vars(A ).items() if k in keys}
for k in keys:
delattr(A , A )
_SCREAMING_SNAKE_CASE = dtype(**A )
outputs.append(A )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(A )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def snake_case_( self , A , A = False ) -> Tuple[DataClass, ...]:
_SCREAMING_SNAKE_CASE = set(args.keys() )
_SCREAMING_SNAKE_CASE = []
for dtype in self.dataclass_types:
_SCREAMING_SNAKE_CASE = {f.name for f in dataclasses.fields(A ) if f.init}
_SCREAMING_SNAKE_CASE = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_SCREAMING_SNAKE_CASE = dtype(**A )
outputs.append(A )
if not allow_extra_keys and unused_keys:
raise ValueError(f'Some keys are not used by the HfArgumentParser: {sorted(A )}' )
return tuple(A )
def snake_case_( self , A , A = False ) -> Tuple[DataClass, ...]:
with open(Path(A ) , encoding="""utf-8""" ) as open_json_file:
_SCREAMING_SNAKE_CASE = json.loads(open_json_file.read() )
_SCREAMING_SNAKE_CASE = self.parse_dict(A , allow_extra_keys=A )
return tuple(A )
def snake_case_( self , A , A = False ) -> Tuple[DataClass, ...]:
_SCREAMING_SNAKE_CASE = self.parse_dict(yaml.safe_load(Path(A ).read_text() ) , allow_extra_keys=A )
return tuple(A )
| 58
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=24, __a=2, __a=6, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=None, __a=1000, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : int = seq_length
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : List[str] = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Optional[int] = type_vocab_size
_lowerCAmelCase : Optional[Any] = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : List[Any] = num_labels
_lowerCAmelCase : Tuple = scope
_lowerCAmelCase : str = range_bbox
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Dict = bbox[i, j, 3]
_lowerCAmelCase : int = bbox[i, j, 1]
_lowerCAmelCase : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : str = bbox[i, j, 2]
_lowerCAmelCase : List[Any] = bbox[i, j, 0]
_lowerCAmelCase : str = t
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = LiltModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(__a, bbox=__a, attention_mask=__a, token_type_ids=__a)
_lowerCAmelCase : str = model(__a, bbox=__a, token_type_ids=__a)
_lowerCAmelCase : List[Any] = model(__a, bbox=__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[Any] = LiltForTokenClassification(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(
__a, bbox=__a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = LiltForQuestionAnswering(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Tuple = model(
__a, bbox=__a, attention_mask=__a, token_type_ids=__a, start_positions=__a, end_positions=__a, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , a , unittest.TestCase):
lowerCamelCase__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
return True
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = LiltModelTester(self)
_lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : Any = type
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = LiltModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__a)
_lowerCAmelCase : Any = torch.tensor([[1, 2]], device=__a)
_lowerCAmelCase : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=__a)
# forward pass
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(input_ids=__a, bbox=__a)
_lowerCAmelCase : Optional[int] = torch.Size([1, 2, 768])
_lowerCAmelCase : List[str] = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]], device=__a, )
self.assertTrue(outputs.last_hidden_state.shape, __a)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], __a, atol=1E-3))
| 36
| 0
|
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCAmelCase : Union[str, Any] = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def a__ ( snake_case__ ) -> Tuple:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
lowerCamelCase = list(s_dict.keys() )
for key in keys:
lowerCamelCase = R""".*/layers_(\d+)"""
lowerCamelCase = key
if re.match(snake_case__ , snake_case__ ):
lowerCamelCase = re.sub(R"""layers_(\d+)""" , R"""block/\1/layer""" , snake_case__ )
lowerCamelCase = R"""(encoder|decoder)\/"""
if re.match(snake_case__ , snake_case__ ):
lowerCamelCase = re.match(snake_case__ , snake_case__ ).groups()
if groups[0] == "encoder":
lowerCamelCase = re.sub(R"""/mlp/""" , R"""/1/mlp/""" , snake_case__ )
lowerCamelCase = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/1/layer_norm/""" , snake_case__ )
elif groups[0] == "decoder":
lowerCamelCase = re.sub(R"""/mlp/""" , R"""/2/mlp/""" , snake_case__ )
lowerCamelCase = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/2/layer_norm/""" , snake_case__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCamelCase = new_key.replace(snake_case__ , snake_case__ )
print(F'{key} -> {new_key}' )
lowerCamelCase = s_dict.pop(snake_case__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCamelCase = s_dict[key].shape[0]
lowerCamelCase = s_dict[key]
for idx in range(snake_case__ ):
lowerCamelCase = expert_weihts[idx]
print(F'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(snake_case__ )
return s_dict
lowerCAmelCase : Dict = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def a__ ( snake_case__ , snake_case__ ) -> Union[str, Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(snake_case__ , """r""" ) as f:
lowerCamelCase = f.read()
lowerCamelCase = re.findall(R"""(.*) = ([0-9.]*)""" , snake_case__ )
lowerCamelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCamelCase = float(snake_case__ ) if """.""" in value else int(snake_case__ )
lowerCamelCase = re.findall(R"""(.*activations) = \(\'(.*)\',\)""" , snake_case__ )[0]
lowerCamelCase = str(activation[1] )
lowerCamelCase = num_experts
lowerCamelCase = SwitchTransformersConfig(**snake_case__ )
return config
def a__ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__="./" , snake_case__=8 ) -> Any:
# Initialise PyTorch model
print(F'Loading flax weights from : {flax_checkpoint_path}' )
lowerCamelCase = checkpoints.load_tax_checkpoint(snake_case__ )
if gin_file is not None:
lowerCamelCase = convert_gin_to_config(snake_case__ , snake_case__ )
else:
lowerCamelCase = SwitchTransformersConfig.from_pretrained(snake_case__ )
lowerCamelCase = SwitchTransformersForConditionalGeneration(snake_case__ )
lowerCamelCase = flax_params["""target"""]
lowerCamelCase = flatten_dict(snake_case__ , sep="""/""" )
lowerCamelCase = rename_keys(snake_case__ )
lowerCamelCase = unflatten_dict(snake_case__ , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
print(F'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
lowerCAmelCase : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 355
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = 0
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
lowerCamelCase = Path(_a ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
lowerCamelCase = Path(_a ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
lowerCamelCase = Path(_a ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowerCamelCase = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop("""image_processor_type""" )
lowerCamelCase = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
lowerCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_a , """clip-base is not a local folder and is not a valid model identifier""" ):
lowerCamelCase = AutoImageProcessor.from_pretrained("""clip-base""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_a , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCamelCase = AutoImageProcessor.from_pretrained(_a , revision="""aaaaaa""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_a , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
lowerCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
lowerCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
lowerCamelCase = Path(_a ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
lowerCamelCase = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowerCAmelCase ( self ):
"""simple docstring"""
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = True
try:
AutoConfig.register("""custom""" , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
lowerCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(_a , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 168
| 0
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__A ='''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
__A ='''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
__A ='''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase = 1 , lowercase = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowercase , hypotheses=lowercase , min_len=lowercase , max_len=lowercase )
}
| 19
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = ['pixel_values']
def __init__( self: Any , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: bool = True , UpperCamelCase_: int = 8 , **UpperCamelCase_: Tuple , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
__lowerCamelCase = pad_size
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None ):
__lowerCamelCase, __lowerCamelCase = get_image_size(UpperCamelCase_ )
__lowerCamelCase = (old_height // size + 1) * size - old_height
__lowerCamelCase = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Any , ):
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_pad if do_pad is not None else self.do_pad
__lowerCamelCase = pad_size if pad_size is not None else self.pad_size
__lowerCamelCase = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_pad:
__lowerCamelCase = [self.pad(UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 12
| 0
|
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
SCREAMING_SNAKE_CASE__ = str(bin(_A ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ = str(bin(_A ) )[2:]
SCREAMING_SNAKE_CASE__ = max(len(_A ) , len(_A ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(_A ) , b_binary.zfill(_A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
import comet # From: unbabel-comet
import torch
import datasets
_SCREAMING_SNAKE_CASE : List[str] = datasets.logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Any = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
_SCREAMING_SNAKE_CASE : Optional[Any] = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
_SCREAMING_SNAKE_CASE : str = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowercase_ ( self : List[Any] , __lowerCamelCase : Dict ) -> Tuple:
if self.config_name == "default":
SCREAMING_SNAKE_CASE__ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
SCREAMING_SNAKE_CASE__ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowercase_ ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=False ) -> str:
if gpus is None:
SCREAMING_SNAKE_CASE__ = 1 if torch.cuda.is_available() else 0
SCREAMING_SNAKE_CASE__ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
SCREAMING_SNAKE_CASE__ = [dict(zip(__lowerCamelCase , __lowerCamelCase ) ) for t in zip(*data.values() )]
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.scorer.predict(__lowerCamelCase , gpus=__lowerCamelCase , progress_bar=__lowerCamelCase )
return {"mean_score": mean_score, "scores": scores}
| 218
| 0
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> list[list[float]]:
_a : List[str] =Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(__a ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_a : Optional[int] =float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
_a : List[Any] =[[0.0, 0.0], [0.0, 0.0]]
_a : int =matrix[1][1], matrix[0][0]
_a : Union[str, Any] =-matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(__a ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(__a ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_a : Tuple =float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
_a : Any =[
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_a : int =(d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_a : Union[str, Any] =-(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_a : Tuple =(d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_a : Any =-(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_a : Dict =(d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_a : Tuple =-(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_a : List[Any] =(d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_a : str =-(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_a : Tuple =(d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_a : Optional[int] =array(__a )
for i in range(3 ):
for j in range(3 ):
_a : Optional[int] =cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_a : str =array(__a )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(__a )
# Calculate the inverse of the matrix
return [[float(d(__a ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 276
|
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Any = Generator(
cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase__ :Optional[Any] = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
UpperCamelCase__ :Optional[int] = None
UpperCamelCase__ :int = None
UpperCamelCase__ :Any = None
UpperCamelCase__ :Any = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
UpperCamelCase__ :List[Any] = self.builder.as_dataset(
split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 97
| 0
|
"""simple docstring"""
from pathlib import Path
import fire
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = Path(a_ )
lowerCamelCase : Any = Path(a_ )
dest_dir.mkdir(exist_ok=a_ )
for path in src_dir.iterdir():
lowerCamelCase : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowerCamelCase : Any = dest_dir.joinpath(path.name )
print(a_ )
dest_path.open('w' ).write('\n'.join(a_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 205
|
"""simple docstring"""
import numpy as np
def UpperCAmelCase ( a_, a_, a_ = 1E-12, a_ = 100, ):
'''simple docstring'''
assert np.shape(a_ )[0] == np.shape(a_ )[1]
# Ensure proper dimensionality.
assert np.shape(a_ )[0] == np.shape(a_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(a_ ) == np.iscomplexobj(a_ )
lowerCamelCase : Optional[int] = np.iscomplexobj(a_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(a_, input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : List[str] = 0
lowerCamelCase : Any = 0
lowerCamelCase : Dict = 1E12
while not convergence:
# Multiple matrix by the vector.
lowerCamelCase : Optional[int] = np.dot(a_, a_ )
# Normalize the resulting output vector.
lowerCamelCase : Optional[int] = w / np.linalg.norm(a_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCamelCase : Optional[Any] = vector.conj().T if is_complex else vector.T
lowerCamelCase : str = np.dot(a_, np.dot(a_, a_ ) )
# Check convergence.
lowerCamelCase : Optional[int] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCamelCase : int = True
lowerCamelCase : Optional[Any] = lambda_
if is_complex:
lowerCamelCase : Any = np.real(lambda_ )
return lambda_, vector
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCamelCase : str = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowerCamelCase : str = np.array([41, 4, 20] )
lowerCamelCase : Optional[Any] = real_input_matrix.astype(np.complexaaa )
lowerCamelCase : Dict = np.triu(1j * complex_input_matrix, 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCamelCase : List[Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCamelCase : str = real_input_matrix
lowerCamelCase : Any = real_vector
elif problem_type == "complex":
lowerCamelCase : str = complex_input_matrix
lowerCamelCase : Dict = complex_vector
# Our implementation.
lowerCamelCase , lowerCamelCase : List[str] = power_iteration(a_, a_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCamelCase , lowerCamelCase : Optional[Any] = np.linalg.eigh(a_ )
# Last eigenvalue is the maximum one.
lowerCamelCase : Dict = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCamelCase : List[str] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(a_ ) - np.abs(a_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 205
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModel.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =AutoModel.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =AutoModelForPreTraining.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =TFAutoModelForCausalLM.from_pretrained(
lowerCAmelCase, output_loading_info=lowerCAmelCase, from_pt=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =AutoModelForCausalLM.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =AutoModelForCausalLM.from_pretrained(
lowerCAmelCase, output_loading_info=lowerCAmelCase, from_tf=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =AutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =TFAutoModelForMaskedLM.from_pretrained(
lowerCAmelCase, output_loading_info=lowerCAmelCase, from_pt=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =AutoModelForMaskedLM.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =AutoModelForMaskedLM.from_pretrained(
lowerCAmelCase, output_loading_info=lowerCAmelCase, from_tf=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase, output_loading_info=lowerCAmelCase, from_pt=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =AutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase, output_loading_info=lowerCAmelCase, from_tf=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =AutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
self.assertEqual(model.num_parameters(), 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase ), 14_410 )
lowerCamelCase_ =AutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
self.assertEqual(model.num_parameters(), 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase ), 14_410 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
self.assertEqual(model.num_parameters(), 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase ), 14_410 )
lowerCamelCase_ =AutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
self.assertEqual(model.num_parameters(), 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase ), 14_410 )
| 75
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348
| 0
|
import math
import qiskit
def A__ ( SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 1) -> qiskit.result.counts.Counts:
if (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
or isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
or isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
):
raise TypeError("""inputs must be integers.""")
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""")
if (
(math.floor(SCREAMING_SNAKE_CASE__) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE__) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE__) != carry_in)
):
raise ValueError("""inputs must be exact integers.""")
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""")
# build registers
__snake_case: Optional[Any] = qiskit.QuantumRegister(4 , """qr""")
__snake_case: Optional[Any] = qiskit.ClassicalRegister(2 , """cr""")
# list the entries
__snake_case: List[Any] = [input_a, input_a, carry_in]
__snake_case: Union[str, Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
for i in range(0 , 3):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE__) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE__) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE__) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3) # ccx = toffoli gate
quantum_circuit.cx(0 , 1)
quantum_circuit.ccx(1 , 2 , 3)
quantum_circuit.cx(1 , 2)
quantum_circuit.cx(0 , 1)
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE__) # measure the last two qbits
__snake_case: List[Any] = qiskit.Aer.get_backend("""aer_simulator""")
__snake_case: Union[str, Any] = qiskit.execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=1000)
return job.result().get_counts(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(f'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 293
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """num_attention_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self : int , A : str , A : Dict=13 , A : str=32 , A : Any=2 , A : Optional[Any]=3 , A : str=640 , A : Tuple=4 , A : Dict="silu" , A : List[Any]=3 , A : Any=32 , A : Any=0.1 , A : int=0.1 , A : Dict=0.1 , A : Optional[Any]=0.02 , A : List[Any]=True , A : Tuple=True , A : Any=10 , A : Optional[int]=None , ):
__snake_case: List[Any] = parent
__snake_case: Dict = batch_size
__snake_case: int = image_size
__snake_case: Tuple = patch_size
__snake_case: Tuple = num_channels
__snake_case: str = last_hidden_size
__snake_case: Dict = num_attention_heads
__snake_case: Dict = hidden_act
__snake_case: Tuple = conv_kernel_size
__snake_case: List[str] = output_stride
__snake_case: List[str] = hidden_dropout_prob
__snake_case: Optional[Any] = attention_probs_dropout_prob
__snake_case: int = classifier_dropout_prob
__snake_case: List[Any] = use_labels
__snake_case: Union[str, Any] = is_training
__snake_case: Union[str, Any] = num_labels
__snake_case: str = initializer_range
__snake_case: List[Any] = scope
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case: Tuple = None
__snake_case: Any = None
if self.use_labels:
__snake_case: Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case: str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case: Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ ( self : int ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : str , A : Optional[Any] , A : Any , A : Any , A : Union[str, Any] ):
__snake_case: List[Any] = MobileViTModel(config=A )
model.to(A )
model.eval()
__snake_case: int = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : str , A : List[Any] , A : Any , A : Any , A : int ):
__snake_case: str = self.num_labels
__snake_case: Optional[int] = MobileViTForImageClassification(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] , A : str , A : Optional[Any] , A : int , A : str ):
__snake_case: List[Any] = self.num_labels
__snake_case: Dict = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case: Tuple = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Tuple = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case: Any = config_and_inputs
__snake_case: Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = MobileViTModelTester(self )
__snake_case: str = MobileViTConfigTester(self , config_class=A , has_text_modality=A )
def UpperCAmelCase__ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : List[Any] ):
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def UpperCAmelCase__ ( self : Dict ):
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = model_class(A )
__snake_case: int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case: Optional[int] = [*signature.parameters.keys()]
__snake_case: List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ ( self : Dict ):
def check_hidden_states_output(A : List[Any] , A : int , A : Tuple ):
__snake_case: List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__snake_case: str = model(**self._prepare_for_class(A , A ) )
__snake_case: Optional[int] = outputs.hidden_states
__snake_case: Any = 5
self.assertEqual(len(A ) , A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case: Union[str, Any] = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case: Dict = True
check_hidden_states_output(A , A , A )
def UpperCAmelCase__ ( self : int ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case: List[Any] = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def A__ ( ) -> Optional[int]:
__snake_case: Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase__ ( self : Dict ):
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Tuple = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(A )
__snake_case: str = self.default_image_processor
__snake_case: Optional[Any] = prepare_img()
__snake_case: List[Any] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
# verify the logits
__snake_case: List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A )
__snake_case: Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = model.to(A )
__snake_case: Dict = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[Any] = prepare_img()
__snake_case: List[str] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: List[Any] = model(**A )
__snake_case: Optional[int] = outputs.logits
# verify the logits
__snake_case: Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , A )
__snake_case: Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Dict ):
__snake_case: int = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: str = model.to(A )
__snake_case: Optional[Any] = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = prepare_img()
__snake_case: Optional[int] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
__snake_case: List[Any] = outputs.logits.detach().cpu()
__snake_case: List[str] = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(50, 60)] )
__snake_case: str = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , A )
__snake_case: int = image_processor.post_process_semantic_segmentation(outputs=A )
__snake_case: Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , A )
| 293
| 1
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str]=13 , lowerCAmelCase__ : str=30 , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Optional[Any]=32 , lowerCAmelCase__ : List[str]=5 , lowerCAmelCase__ : Optional[int]=4 , lowerCAmelCase__ : List[str]=37 , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : str=10 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : str=3 , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Tuple=2 , ):
SCREAMING_SNAKE_CASE_: List[str] = parent
SCREAMING_SNAKE_CASE_: str = batch_size
SCREAMING_SNAKE_CASE_: Dict = image_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE_: Optional[Any] = num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] = is_training
SCREAMING_SNAKE_CASE_: int = use_labels
SCREAMING_SNAKE_CASE_: Dict = hidden_size
SCREAMING_SNAKE_CASE_: List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE_: Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_: str = intermediate_size
SCREAMING_SNAKE_CASE_: int = hidden_act
SCREAMING_SNAKE_CASE_: int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE_: List[Any] = initializer_range
SCREAMING_SNAKE_CASE_: str = scope
SCREAMING_SNAKE_CASE_: Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE_: int = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_: Dict = num_patches + 2
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_: int = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_: Any = DeiTModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: int = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_: List[Any] = DeiTForMaskedImageModeling(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: int = model(lowerCAmelCase__)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Dict = 1
SCREAMING_SNAKE_CASE_: Tuple = DeiTForMaskedImageModeling(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: List[str] = model(lowerCAmelCase__)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_: Tuple = DeiTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Optional[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Optional[int] = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = DeiTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: Optional[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_: List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[str] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : Any = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : Any = False
_UpperCAmelCase : Optional[int] = False
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[Any] = DeiTModelTester(self)
SCREAMING_SNAKE_CASE_: Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
pass
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Union[str, Any] = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE_: Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict=False):
SCREAMING_SNAKE_CASE_: Dict = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__)
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: Optional[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase__)
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE_: Optional[int] = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.train()
SCREAMING_SNAKE_CASE_: Optional[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model(**lowerCAmelCase__).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_: List[str] = False
SCREAMING_SNAKE_CASE_: List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase__) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE_: Any = model_class(lowerCAmelCase__)
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase__)
model.train()
SCREAMING_SNAKE_CASE_: List[str] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = model(**lowerCAmelCase__).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: Tuple = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase__),
*get_values(lowerCAmelCase__),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}"):
SCREAMING_SNAKE_CASE_: Union[str, Any] = problem_type["title"]
SCREAMING_SNAKE_CASE_: Any = problem_type["num_labels"]
SCREAMING_SNAKE_CASE_: int = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.train()
SCREAMING_SNAKE_CASE_: int = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__)
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE_: int = inputs["labels"].unsqueeze(1).repeat(1 , problem_type["num_labels"])
SCREAMING_SNAKE_CASE_: Tuple = inputs["labels"].to(problem_type["dtype"])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase__) as warning_list:
SCREAMING_SNAKE_CASE_: Dict = model(**lowerCAmelCase__).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}")
loss.backward()
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Dict = DeiTModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def A_ ( ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224")
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: List[str] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224").to(
lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE_: Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE_: Tuple = image_processor(images=lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] = model(**lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Any = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = torch.tensor([-1.0266, 0.1912, -1.2861]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
@require_accelerate
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: List[Any] = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto")
SCREAMING_SNAKE_CASE_: int = self.default_image_processor
SCREAMING_SNAKE_CASE_: Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE_: Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: Dict = inputs.pixel_values.to(lowerCAmelCase__)
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE_: int = model(lowerCAmelCase__)
| 13
|
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _A (lowerCAmelCase__ :Dict ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class a :
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
pass
def __UpperCAmelCase ( self ) -> Any:
pass
def __UpperCAmelCase ( self ) -> List[Any]:
pass
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = np.abs((a - b) ).max()
self.assertLessEqual(__magic_name__ , __magic_name__ , f'Difference between torch and flax is {diff} (>= {tol}).' )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Tuple:
_a = VisionTextDualEncoderConfig.from_vision_text_configs(__magic_name__ , __magic_name__ )
_a = FlaxVisionTextDualEncoderModel(__magic_name__ )
_a = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Optional[Any]:
_a , _a = self.get_vision_text_model(__magic_name__ , __magic_name__ )
_a = {'vision_model': vision_model, 'text_model': text_model}
_a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__magic_name__ )
_a = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Union[str, Any]:
_a , _a = self.get_vision_text_model(__magic_name__ , __magic_name__ )
_a = {'vision_model': vision_model, 'text_model': text_model}
_a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__magic_name__ )
_a = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
_a = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__ )
_a = FlaxVisionTextDualEncoderModel.from_pretrained(__magic_name__ )
_a = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
_a = after_output[0]
_a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__magic_name__ , 1e-3 )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Any:
_a , _a = self.get_vision_text_model(__magic_name__ , __magic_name__ )
_a = {'vision_model': vision_model, 'text_model': text_model}
_a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__magic_name__ )
_a = model(
input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , output_attentions=__magic_name__ )
_a = output.vision_model_output.attentions
self.assertEqual(len(__magic_name__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_a = to_atuple(vision_model.config.image_size )
_a = to_atuple(vision_model.config.patch_size )
_a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_a = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_a = output.text_model_output.attentions
self.assertEqual(len(__magic_name__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
pt_model.to(__magic_name__ )
pt_model.eval()
# prepare inputs
_a = inputs_dict
_a = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_a = pt_model(**__magic_name__ ).to_tuple()
_a = fx_model(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__magic_name__ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__magic_name__ )
_a = FlaxVisionTextDualEncoderModel.from_pretrained(__magic_name__ , from_pt=__magic_name__ )
_a = fx_model_loaded(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__magic_name__ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__magic_name__ )
_a = VisionTextDualEncoderModel.from_pretrained(__magic_name__ , from_flax=__magic_name__ )
pt_model_loaded.to(__magic_name__ )
pt_model_loaded.eval()
with torch.no_grad():
_a = pt_model_loaded(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__magic_name__ , pt_output_loaded.numpy() , 4e-2 )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
_a = VisionTextDualEncoderConfig.from_vision_text_configs(__magic_name__ , __magic_name__ )
_a = VisionTextDualEncoderModel(__magic_name__ )
_a = FlaxVisionTextDualEncoderModel(__magic_name__ )
_a = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __magic_name__ )
_a = fx_state
self.check_pt_flax_equivalence(__magic_name__ , __magic_name__ , __magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = VisionTextDualEncoderConfig.from_vision_text_configs(__magic_name__ , __magic_name__ )
_a = VisionTextDualEncoderModel(__magic_name__ )
_a = FlaxVisionTextDualEncoderModel(__magic_name__ )
_a = load_flax_weights_in_pytorch_model(__magic_name__ , fx_model.params )
self.check_pt_flax_equivalence(__magic_name__ , __magic_name__ , __magic_name__ )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__magic_name__ )
def __UpperCAmelCase ( self ) -> Dict:
_a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.prepare_config_and_inputs()
self.check_save_load(**__magic_name__ )
def __UpperCAmelCase ( self ) -> Dict:
_a = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__magic_name__ )
@is_pt_flax_cross_test
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.prepare_config_and_inputs()
_a = config_inputs_dict.pop('vision_config' )
_a = config_inputs_dict.pop('text_config' )
_a = config_inputs_dict
self.check_equivalence_pt_to_flax(__magic_name__ , __magic_name__ , __magic_name__ )
self.check_equivalence_flax_to_pt(__magic_name__ , __magic_name__ , __magic_name__ )
@slow
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a , _a = self.get_pretrained_model_and_inputs()
_a = model_a(**__magic_name__ )
_a = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__magic_name__ )
_a = FlaxVisionTextDualEncoderModel.from_pretrained(__magic_name__ )
_a = model_a(**__magic_name__ )
_a = after_outputs[0]
_a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__magic_name__ , 1e-5 )
@require_flax
class a ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
def __UpperCAmelCase ( self ) -> List[str]:
_a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=__magic_name__ , text_from_pt=__magic_name__ , )
_a = 13
_a = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_a = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_a = random_attention_mask([batch_size, 4] )
_a = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Optional[int]:
_a = FlaxViTModel(__magic_name__ )
_a = FlaxBertModel(__magic_name__ )
return vision_model, text_model
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = FlaxViTModelTester(self )
_a = FlaxBertModelTester(self )
_a = vit_model_tester.prepare_config_and_inputs()
_a = bert_model_tester.prepare_config_and_inputs()
_a , _a = vision_config_and_inputs
_a , _a , _a , _a = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class a ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
def __UpperCAmelCase ( self ) -> Any:
_a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=__magic_name__ , text_from_pt=__magic_name__ , )
_a = 13
_a = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_a = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_a = random_attention_mask([batch_size, 4] )
_a = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = FlaxCLIPVisionModel(__magic_name__ )
_a = FlaxBertModel(__magic_name__ )
return vision_model, text_model
def __UpperCAmelCase ( self ) -> Tuple:
_a = FlaxCLIPVisionModelTester(self )
_a = FlaxBertModelTester(self )
_a = clip_model_tester.prepare_config_and_inputs()
_a = bert_model_tester.prepare_config_and_inputs()
_a , _a = vision_config_and_inputs
_a , _a , _a , _a = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class a ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self ) -> Tuple:
_a = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 )
_a = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
_a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_a = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=__magic_name__ , padding=__magic_name__ , return_tensors='np' )
_a = model(**__magic_name__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_a = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __magic_name__ , atol=1e-3 ) )
| 168
| 0
|
from __future__ import annotations
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case ):
snake_case , snake_case = text, pattern
snake_case , snake_case = len(__snake_case ), len(__snake_case )
def a_ ( self , __snake_case ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def a_ ( self , __snake_case ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def a_ ( self ):
# searches pattern in text and returns index positions
snake_case = []
for i in range(self.textLen - self.patLen + 1 ):
snake_case = self.mismatch_in_text(__snake_case )
if mismatch_index == -1:
positions.append(__snake_case )
else:
snake_case = self.match_in_pattern(self.text[mismatch_index] )
snake_case = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_SCREAMING_SNAKE_CASE : str = "ABAABA"
_SCREAMING_SNAKE_CASE : Optional[int] = "AB"
_SCREAMING_SNAKE_CASE : List[Any] = BoyerMooreSearch(text, pattern)
_SCREAMING_SNAKE_CASE : Dict = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 213
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : int = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 213
| 1
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
_A : Optional[int] ='''hf-internal-testing/tiny-random-bert'''
_A : Union[str, Any] =os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
_A : Optional[Any] ='''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = cached_file(UpperCamelCase__ , UpperCamelCase__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCamelCase__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) )
with open(os.path.join(UpperCamelCase__ , """refs""" , """main""" ) ) as f:
lowerCamelCase__ : int = f.read()
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , """snapshots""" , UpperCamelCase__ , UpperCamelCase__ ) )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# File is cached at the same place the second time.
lowerCamelCase__ : Union[str, Any] = cached_file(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# Using a specific revision to test the full commit hash.
lowerCamelCase__ : str = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="""9b8c223""" )
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , """snapshots""" , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase_ ( self: List[Any] ):
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid model identifier""" ):
lowerCamelCase__ : Tuple = cached_file("""tiny-random-bert""" , UpperCamelCase__ )
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid git identifier""" ):
lowerCamelCase__ : List[str] = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="""aaaa""" )
with self.assertRaisesRegex(UpperCamelCase__ , """does not appear to have a file named""" ):
lowerCamelCase__ : str = cached_file(UpperCamelCase__ , """conf""" )
def lowerCamelCase_ ( self: Optional[int] ):
with self.assertRaisesRegex(UpperCamelCase__ , """does not appear to have a file named""" ):
lowerCamelCase__ : Any = cached_file(UpperCamelCase__ , """conf""" )
with open(os.path.join(UpperCamelCase__ , """refs""" , """main""" ) ) as f:
lowerCamelCase__ : Optional[int] = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , """.no_exist""" , UpperCamelCase__ , """conf""" ) ) )
lowerCamelCase__ : Optional[Any] = cached_file(UpperCamelCase__ , """conf""" , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
lowerCamelCase__ : Dict = cached_file(UpperCamelCase__ , """conf""" , local_files_only=UpperCamelCase__ , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = mock.Mock()
lowerCamelCase__ : str = 500
lowerCamelCase__ : List[str] = {}
lowerCamelCase__ : Union[str, Any] = HTTPError
lowerCamelCase__ : List[str] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=UpperCamelCase__ ) as mock_head:
lowerCamelCase__ : List[Any] = cached_file(UpperCamelCase__ , """conf""" , _raise_exceptions_for_connection_errors=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase_ ( self: Dict ):
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase__ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase__ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase__ ) )
def lowerCamelCase_ ( self: Optional[Any] ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , UpperCamelCase__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , UpperCamelCase__ , revision="""ahaha""" )
lowerCamelCase__ : Tuple = get_file_from_repo("""bert-base-cased""" , UpperCamelCase__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowerCamelCase__ : str = json.loads(open(UpperCamelCase__ , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 768 )
def lowerCamelCase_ ( self: List[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : int = Path(UpperCamelCase__ ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(UpperCamelCase__ , """a.txt""" ) , str(UpperCamelCase__ ) )
self.assertIsNone(get_file_from_repo(UpperCamelCase__ , """b.txt""" ) )
| 41
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : Any ):
"""simple docstring"""
__a =np.argmax(_snake_case , axis=1 )
return np.sum(outputs == labels )
def UpperCamelCase_( _snake_case : List[str] ):
"""simple docstring"""
with open(_snake_case , encoding='utf_8' ) as f:
__a =csv.reader(_snake_case )
__a =[]
next(_snake_case ) # skip the first line
for line in tqdm(_snake_case ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def UpperCamelCase_( _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Dict , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Tuple ):
"""simple docstring"""
__a =[]
for dataset in encoded_datasets:
__a =len(_snake_case )
__a =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__a =np.zeros((n_batch, 2) , dtype=np.intaa )
__a =np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
__a =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_snake_case ):
__a =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a =with_conta
__a =with_conta
__a =len(_snake_case ) - 1
__a =len(_snake_case ) - 1
__a =with_conta
__a =with_conta
__a =mc_label
__a =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_snake_case ) for t in all_inputs ) )
return tensor_datasets
def UpperCamelCase_( ):
"""simple docstring"""
__a =argparse.ArgumentParser()
parser.add_argument('--model_name' , type=_snake_case , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=_snake_case , type=_snake_case , required=_snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=_snake_case , default='' )
parser.add_argument('--eval_dataset' , type=_snake_case , default='' )
parser.add_argument('--seed' , type=_snake_case , default=42 )
parser.add_argument('--num_train_epochs' , type=_snake_case , default=3 )
parser.add_argument('--train_batch_size' , type=_snake_case , default=8 )
parser.add_argument('--eval_batch_size' , type=_snake_case , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=_snake_case , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=_snake_case , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=_snake_case , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=_snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=_snake_case , default=6.2_5e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=_snake_case , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=_snake_case , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=_snake_case , default=0.01 )
parser.add_argument('--lm_coef' , type=_snake_case , default=0.9 )
parser.add_argument('--n_valid' , type=_snake_case , default=374 )
parser.add_argument('--server_ip' , type=_snake_case , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=_snake_case , default='' , help='Can be used for distant debugging.' )
__a =parser.parse_args()
print(_snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__a =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__a =torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(_snake_case , _snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__a =['_start_', '_delimiter_', '_classify_']
__a =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_snake_case )
__a =tokenizer.convert_tokens_to_ids(_snake_case )
__a =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_snake_case ) )
model.to(_snake_case )
# Load and encode the datasets
def tokenize_and_encode(_snake_case : int ):
if isinstance(_snake_case , _snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_snake_case ) )
elif isinstance(_snake_case , _snake_case ):
return obj
return [tokenize_and_encode(_snake_case ) for o in obj]
logger.info('Encoding dataset...' )
__a =load_rocstories_dataset(args.train_dataset )
__a =load_rocstories_dataset(args.eval_dataset )
__a =(train_dataset, eval_dataset)
__a =tokenize_and_encode(_snake_case )
# Compute the max input length for the Transformer
__a =model.config.n_positions // 2 - 2
__a =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__a =min(_snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__a =pre_process_datasets(_snake_case , _snake_case , _snake_case , *_snake_case )
__a , __a =tensor_datasets[0], tensor_datasets[1]
__a =TensorDataset(*_snake_case )
__a =RandomSampler(_snake_case )
__a =DataLoader(_snake_case , sampler=_snake_case , batch_size=args.train_batch_size )
__a =TensorDataset(*_snake_case )
__a =SequentialSampler(_snake_case )
__a =DataLoader(_snake_case , sampler=_snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__a =args.max_steps
__a =args.max_steps // (len(_snake_case ) // args.gradient_accumulation_steps) + 1
else:
__a =len(_snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
__a =list(model.named_parameters() )
__a =['bias', 'LayerNorm.bias', 'LayerNorm.weight']
__a =[
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
__a =AdamW(_snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
__a =get_linear_schedule_with_warmup(
_snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=_snake_case )
if args.do_train:
__a , __a , __a =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
__a =0
__a =0
__a =tqdm(_snake_case , desc='Training' )
for step, batch in enumerate(_snake_case ):
__a =tuple(t.to(_snake_case ) for t in batch )
__a , __a , __a , __a =batch
__a =model(_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__a =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__a =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__a ='Training loss: {:.2e} lr: {:.2e}'.format(_snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__a =model.module if hasattr(_snake_case , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__a =os.path.join(args.output_dir , _snake_case )
__a =os.path.join(args.output_dir , _snake_case )
torch.save(model_to_save.state_dict() , _snake_case )
model_to_save.config.to_json_file(_snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__a =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__a =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_snake_case )
if args.do_eval:
model.eval()
__a , __a =0, 0
__a , __a =0, 0
for batch in tqdm(_snake_case , desc='Evaluating' ):
__a =tuple(t.to(_snake_case ) for t in batch )
__a , __a , __a , __a =batch
with torch.no_grad():
__a , __a , __a , __a =model(
_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__a =mc_logits.detach().cpu().numpy()
__a =mc_labels.to('cpu' ).numpy()
__a =accuracy(_snake_case , _snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__a =eval_loss / nb_eval_steps
__a =eval_accuracy / nb_eval_examples
__a =tr_loss / nb_tr_steps if args.do_train else None
__a ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
__a =os.path.join(args.output_dir , 'eval_results.txt' )
with open(_snake_case , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _snake_case , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 218
| 0
|
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ = 1_6
lowercase__ = 3_2
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 16 ) -> str:
'''simple docstring'''
snake_case : Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case : str = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case : int = datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
snake_case : int = 8
else:
snake_case : List[str] = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE__ , padding='''longest''' , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case : int = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , drop_last=SCREAMING_SNAKE_CASE__ )
snake_case : int = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : List[str] = config['''lr''']
snake_case : Optional[Any] = int(config['''num_epochs'''] )
snake_case : Tuple = int(config['''seed'''] )
snake_case : Optional[Any] = int(config['''batch_size'''] )
snake_case : int = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
snake_case : Tuple = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
snake_case : str = MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE__ )
snake_case : Dict = get_dataloaders(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : int = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=SCREAMING_SNAKE_CASE__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case : Dict = model.to(accelerator.device )
# Instantiate optimizer
snake_case : int = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ )
# Instantiate scheduler
snake_case : List[Any] = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case : Any = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
snake_case : Optional[int] = outputs.loss
snake_case : Any = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case : str = model(**SCREAMING_SNAKE_CASE__ )
snake_case : List[Any] = outputs.logits.argmax(dim=-1 )
snake_case : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ , )
snake_case : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
snake_case : Any = parser.parse_args()
snake_case : str = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 357
|
'''simple docstring'''
class snake_case__ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : list[int] ) -> None:
"""simple docstring"""
snake_case : List[Any] = len(UpperCamelCase__ )
snake_case : Tuple = [0] * len_array
if len_array > 0:
snake_case : List[str] = array[0]
for i in range(1 , UpperCamelCase__ ):
snake_case : Tuple = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase ( self : str , UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
snake_case : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCamelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83
| 0
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'width_multiplier' ) )
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=64 , lowerCAmelCase=2 , lowerCAmelCase=3 , lowerCAmelCase="swish" , lowerCAmelCase=3 , lowerCAmelCase=32 , lowerCAmelCase=0.1 , lowerCAmelCase=0.02 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=10 , lowerCAmelCase=None , lowerCAmelCase=0.25 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , ) -> List[str]:
'''simple docstring'''
_lowercase =parent
_lowercase =batch_size
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =make_divisible(512 * width_multiplier , divisor=8 )
_lowercase =hidden_act
_lowercase =conv_kernel_size
_lowercase =output_stride
_lowercase =classifier_dropout_prob
_lowercase =use_labels
_lowercase =is_training
_lowercase =num_labels
_lowercase =initializer_range
_lowercase =scope
_lowercase =width_multiplier
_lowercase =ffn_dropout
_lowercase =attn_dropout
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase =None
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.num_labels )
_lowercase =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowercase =self.get_config()
return config, pixel_values, labels, pixel_labels
def A__ ( self ) -> int:
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Any:
'''simple docstring'''
_lowercase =MobileViTVaModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
_lowercase =model(lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.num_labels
_lowercase =MobileViTVaForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
_lowercase =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.num_labels
_lowercase =MobileViTVaForSemanticSegmentation(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
_lowercase =model(lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowercase =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase =config_and_inputs
_lowercase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_a = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =MobileViTVaModelTester(self )
_lowercase =MobileViTVaConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def A__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def A__ ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def A__ ( self ) -> int:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(lowerCAmelCase )
_lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
_lowercase =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
_lowercase =outputs.hidden_states
_lowercase =5
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowercase =2
for i in range(len(lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase )
@slow
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase =MobileViTVaModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def a ( ) -> Any:
"""simple docstring"""
_lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
lowerCAmelCase )
_lowercase =self.default_image_processor
_lowercase =prepare_img()
_lowercase =image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase =model(**lowerCAmelCase )
# verify the logits
_lowercase =torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
_lowercase =torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
@slow
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowercase =model.to(lowerCAmelCase )
_lowercase =MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowercase =prepare_img()
_lowercase =image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase =model(**lowerCAmelCase )
_lowercase =outputs.logits
# verify the logits
_lowercase =torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCAmelCase )
_lowercase =torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase =MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowercase =model.to(lowerCAmelCase )
_lowercase =MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowercase =prepare_img()
_lowercase =image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase =model(**lowerCAmelCase )
_lowercase =outputs.logits.detach().cpu()
_lowercase =image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase , target_sizes=[(50, 60)] )
_lowercase =torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase )
_lowercase =image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase )
_lowercase =torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase )
| 205
|
import os
def a ( ) -> Any:
"""simple docstring"""
with open(os.path.dirname(A__ ) + '/p022_names.txt' ) as file:
_lowercase =str(file.readlines()[0] )
_lowercase =names.replace('"' , '' ).split(',' )
names.sort()
_lowercase =0
_lowercase =0
for i, name in enumerate(A__ ):
for letter in name:
name_score += ord(A__ ) - 64
total_score += (i + 1) * name_score
_lowercase =0
return total_score
if __name__ == "__main__":
print(solution())
| 205
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase__ = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class __snake_case ( __UpperCamelCase ):
a__ = """albert"""
def __init__( self , lowercase=3_00_00 , lowercase=1_28 , lowercase=40_96 , lowercase=12 , lowercase=1 , lowercase=64 , lowercase=1_63_84 , lowercase=1 , lowercase="gelu_new" , lowercase=0 , lowercase=0 , lowercase=5_12 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=0.1 , lowercase="absolute" , lowercase=0 , lowercase=2 , lowercase=3 , **lowercase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase)
a__: Tuple = vocab_size
a__: Optional[int] = embedding_size
a__: str = hidden_size
a__: Any = num_hidden_layers
a__: List[str] = num_hidden_groups
a__: Tuple = num_attention_heads
a__: str = inner_group_num
a__: int = hidden_act
a__: List[str] = intermediate_size
a__: Optional[int] = hidden_dropout_prob
a__: Dict = attention_probs_dropout_prob
a__: Optional[Any] = max_position_embeddings
a__: List[Any] = type_vocab_size
a__: List[str] = initializer_range
a__: List[Any] = layer_norm_eps
a__: Any = classifier_dropout_prob
a__: Any = position_embedding_type
class __snake_case ( __UpperCamelCase ):
@property
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
if self.task == "multiple-choice":
a__: int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
a__: List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 356
|
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = PriorTransformer
a__ = """hidden_states"""
@property
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Union[str, Any] = 4
a__: Any = 8
a__: Optional[Any] = 7
a__: Tuple = floats_tensor((batch_size, embedding_dim)).to(lowercase)
a__: Optional[int] = floats_tensor((batch_size, embedding_dim)).to(lowercase)
a__: List[str] = floats_tensor((batch_size, num_embeddings, embedding_dim)).to(lowercase)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCamelCase_ ( self , lowercase=0) -> str:
'''simple docstring'''
torch.manual_seed(lowercase)
a__: Optional[Any] = 4
a__: Optional[Any] = 8
a__: Union[str, Any] = 7
a__: Optional[Any] = torch.randn((batch_size, embedding_dim)).to(lowercase)
a__: List[str] = torch.randn((batch_size, embedding_dim)).to(lowercase)
a__: Tuple = torch.randn((batch_size, num_embeddings, embedding_dim)).to(lowercase)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return (4, 8)
@property
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return (4, 8)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: int = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
a__: Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__: Union[str, Any] = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowercase)
self.assertIsNotNone(lowercase)
self.assertEqual(len(loading_info['missing_keys']) , 0)
model.to(lowercase)
a__: Any = model(**self.dummy_input)[0]
assert hidden_states is not None, "Make sure output is not None"
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__ , a__: Tuple = self.prepare_init_args_and_inputs_for_common()
a__: Any = self.model_class(**lowercase)
a__: str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__: Tuple = [*signature.parameters.keys()]
a__: List[Any] = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowercase)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: str = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy')
a__: str = model.to(lowercase)
if hasattr(lowercase , 'set_default_attn_processor'):
model.set_default_attn_processor()
a__: Dict = self.get_dummy_seed_input()
with torch.no_grad():
a__: str = model(**lowercase)[0]
a__: str = output[0, :5].flatten().cpu()
print(lowercase)
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
a__: Any = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239])
self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1e-2))
@slow
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self , lowercase=1 , lowercase=7_68 , lowercase=77 , lowercase=0) -> int:
'''simple docstring'''
torch.manual_seed(lowercase)
a__: Union[str, Any] = batch_size
a__: List[str] = embedding_dim
a__: str = num_embeddings
a__: Tuple = torch.randn((batch_size, embedding_dim)).to(lowercase)
a__: List[str] = torch.randn((batch_size, embedding_dim)).to(lowercase)
a__: str = torch.randn((batch_size, num_embeddings, embedding_dim)).to(lowercase)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
])
def lowerCamelCase_ ( self , lowercase , lowercase) -> str:
'''simple docstring'''
a__: Tuple = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior')
model.to(lowercase)
a__: Optional[Any] = self.get_dummy_seed_input(seed=lowercase)
with torch.no_grad():
a__: Optional[int] = model(**lowercase)[0]
assert list(sample.shape) == [1, 7_68]
a__: List[str] = sample[0, :8].flatten().cpu()
print(lowercase)
a__: Union[str, Any] = torch.tensor(lowercase)
assert torch_all_close(lowercase , lowercase , atol=1e-3)
| 203
| 0
|
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__A = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :List[str] = list(s_dict.keys() )
for key in keys:
lowerCAmelCase__ :List[str] = r'.*/layers_(\d+)'
lowerCAmelCase__ :Optional[int] = key
if re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Union[str, Any] = re.sub(r'layers_(\d+)' , r'block/\1/layer' , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = r'(encoder|decoder)\/'
if re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :List[Any] = re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).groups()
if groups[0] == "encoder":
lowerCAmelCase__ :Tuple = re.sub(r'/mlp/' , r'/1/mlp/' , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = re.sub(r'/pre_mlp_layer_norm/' , r'/1/layer_norm/' , _SCREAMING_SNAKE_CASE )
elif groups[0] == "decoder":
lowerCAmelCase__ :int = re.sub(r'/mlp/' , r'/2/mlp/' , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = re.sub(r'/pre_mlp_layer_norm/' , r'/2/layer_norm/' , _SCREAMING_SNAKE_CASE )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCAmelCase__ :Tuple = new_key.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F"{key} -> {new_key}" )
lowerCAmelCase__ :List[str] = s_dict.pop(_SCREAMING_SNAKE_CASE )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase__ :Optional[int] = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase__ :Dict = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCAmelCase__ :Optional[Any] = s_dict[key].shape[0]
lowerCAmelCase__ :Optional[int] = s_dict[key]
for idx in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Tuple = expert_weihts[idx]
print(F"{key} -> {key.replace('expert/' , 'nested fstring' )}" )
s_dict.pop(_SCREAMING_SNAKE_CASE )
return s_dict
__A = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
import regex as re
with open(_SCREAMING_SNAKE_CASE , 'r' ) as f:
lowerCAmelCase__ :Tuple = f.read()
lowerCAmelCase__ :Any = re.findall(r'(.*) = ([0-9.]*)' , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCAmelCase__ :int = float(_SCREAMING_SNAKE_CASE ) if '.' in value else int(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = re.findall(r'(.*activations) = \(\'(.*)\',\)' , _SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase__ :str = str(activation[1] )
lowerCAmelCase__ :Optional[Any] = num_experts
lowerCAmelCase__ :Tuple = SwitchTransformersConfig(**_SCREAMING_SNAKE_CASE )
return config
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="./" , _SCREAMING_SNAKE_CASE=8 ) ->Tuple:
"""simple docstring"""
print(F"Loading flax weights from : {flax_checkpoint_path}" )
lowerCAmelCase__ :Tuple = checkpoints.load_tax_checkpoint(_SCREAMING_SNAKE_CASE )
if gin_file is not None:
lowerCAmelCase__ :int = convert_gin_to_config(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase__ :Optional[int] = SwitchTransformersConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = SwitchTransformersForConditionalGeneration(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = flax_params['target']
lowerCAmelCase__ :Union[str, Any] = flatten_dict(_SCREAMING_SNAKE_CASE , sep='/' )
lowerCAmelCase__ :int = rename_keys(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Union[str, Any] = unflatten_dict(_SCREAMING_SNAKE_CASE , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F"Save PyTorch model to {pytorch_dump_path}" )
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
__A = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 293
|
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
return 1.0 / (1.0 + np.exp(-_outputs ))
def __A (_SCREAMING_SNAKE_CASE ) ->Tuple:
"""simple docstring"""
lowerCAmelCase__ :List[str] = np.max(_outputs , axis=-1 , keepdims=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_SCREAMING_SNAKE_CASE )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Any = """sigmoid"""
__magic_name__ :Optional[Any] = """softmax"""
__magic_name__ :Optional[Any] = """none"""
@add_end_docstrings(
a , r"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = False
__magic_name__ :Dict = ClassificationFunction.NONE
def __init__( self , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="" , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = tokenizer_kwargs
lowerCAmelCase__ :List[Any] = {}
if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None:
lowerCAmelCase__ :List[Any] = self.model.config.return_all_scores
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or top_k is None:
lowerCAmelCase__ :int = top_k
lowerCAmelCase__ :Dict = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , __UpperCAmelCase , )
if return_all_scores:
lowerCAmelCase__ :List[Any] = None
else:
lowerCAmelCase__ :Union[str, Any] = 1
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowerCAmelCase__ :List[Any] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowerCAmelCase__ :Optional[Any] = 'top_k' not in kwargs
if isinstance(args[0] , __UpperCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def snake_case ( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.framework
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return self.tokenizer(**__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1 and isinstance(inputs[0] , __UpperCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' )
return self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.model(**__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowerCAmelCase__ :str = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowerCAmelCase__ :int = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None:
lowerCAmelCase__ :Optional[Any] = self.model.config.function_to_apply
else:
lowerCAmelCase__ :Dict = ClassificationFunction.NONE
lowerCAmelCase__ :int = model_outputs['logits'][0]
lowerCAmelCase__ :Union[str, Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowerCAmelCase__ :Dict = sigmoid(__UpperCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowerCAmelCase__ :int = softmax(__UpperCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
lowerCAmelCase__ :Tuple = outputs
else:
raise ValueError(F"Unrecognized `function_to_apply` argument: {function_to_apply}" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowerCAmelCase__ :Any = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(__UpperCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase )
if top_k is not None:
lowerCAmelCase__ :List[str] = dict_scores[:top_k]
return dict_scores
| 293
| 1
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __a ( lowerCAmelCase_ : int ,lowerCAmelCase_ : Optional[int]=False ) -> int:
'''simple docstring'''
try:
UpperCAmelCase_= os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_= default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_= strtobool(lowerCAmelCase_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__A = parse_flag_from_env('''RUN_SLOW''', default=False)
__A = parse_flag_from_env('''RUN_REMOTE''', default=False)
__A = parse_flag_from_env('''RUN_LOCAL''', default=True)
__A = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__A = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__A = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__A = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__A = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__A = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__A = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__A = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def __a ( lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
UpperCAmelCase_= unittest.skip("""test requires faiss""" )(lowerCAmelCase_ )
return test_case
def __a ( lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
UpperCAmelCase_= unittest.skip("""test requires regex""" )(lowerCAmelCase_ )
return test_case
def __a ( lowerCAmelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
UpperCAmelCase_= unittest.skip("""test requires elasticsearch""" )(lowerCAmelCase_ )
return test_case
def __a ( lowerCAmelCase_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
UpperCAmelCase_= unittest.skip("""test requires sqlalchemy""" )(lowerCAmelCase_ )
return test_case
def __a ( lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
UpperCAmelCase_= unittest.skip("""test requires PyTorch""" )(lowerCAmelCase_ )
return test_case
def __a ( lowerCAmelCase_ : str ) -> Any:
'''simple docstring'''
if not config.TF_AVAILABLE:
UpperCAmelCase_= unittest.skip("""test requires TensorFlow""" )(lowerCAmelCase_ )
return test_case
def __a ( lowerCAmelCase_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
if not config.JAX_AVAILABLE:
UpperCAmelCase_= unittest.skip("""test requires JAX""" )(lowerCAmelCase_ )
return test_case
def __a ( lowerCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
if not config.PIL_AVAILABLE:
UpperCAmelCase_= unittest.skip("""test requires Pillow""" )(lowerCAmelCase_ )
return test_case
def __a ( lowerCAmelCase_ : int ) -> Any:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(lowerCAmelCase_ )
else:
return test_case
def __a ( lowerCAmelCase_ : str ) -> List[str]:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(lowerCAmelCase_ )
else:
return test_case
def __a ( lowerCAmelCase_ : str ) -> List[str]:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(lowerCAmelCase_ )
else:
return test_case
def __a ( lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
def _require_spacy_model(lowerCAmelCase_ : str ):
try:
import spacy # noqa F401
spacy.load(lowerCAmelCase_ )
except ImportError:
return unittest.skip("""test requires spacy""" )(lowerCAmelCase_ )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(lowerCAmelCase_ ) )(lowerCAmelCase_ )
else:
return test_case
return _require_spacy_model
def __a ( lowerCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(lowerCAmelCase_ )
else:
return test_case
def __a ( lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(lowerCAmelCase_ )
else:
return test_case
def __a ( lowerCAmelCase_ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
UpperCAmelCase_= unittest.skip("""test is slow""" )(lowerCAmelCase_ )
return test_case
def __a ( lowerCAmelCase_ : Any ) -> int:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
UpperCAmelCase_= unittest.skip("""test is local""" )(lowerCAmelCase_ )
return test_case
def __a ( lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCAmelCase_= unittest.skip("""test is packaged""" )(lowerCAmelCase_ )
return test_case
def __a ( lowerCAmelCase_ : Optional[Any] ) -> str:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
UpperCAmelCase_= unittest.skip("""test requires remote""" )(lowerCAmelCase_ )
return test_case
def __a ( *lowerCAmelCase_ : int ) -> Any:
'''simple docstring'''
def decorate(cls : Optional[int] ):
for name, fn in cls.__dict__.items():
if callable(lowerCAmelCase_ ) and name.startswith("""test""" ):
for decorator in decorators:
UpperCAmelCase_= decorator(lowerCAmelCase_ )
setattr(cls ,lowerCAmelCase_ ,lowerCAmelCase_ )
return cls
return decorate
class lowercase ( snake_case__):
"""simple docstring"""
pass
class lowercase ( snake_case__):
"""simple docstring"""
a__ : List[Any] = 0
a__ : Tuple = 1
a__ : List[Any] = 2
@contextmanager
def __a ( lowerCAmelCase_ : Optional[Any]=OfflineSimulationMode.CONNECTION_FAILS ,lowerCAmelCase_ : Any=1E-16 ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_= requests.Session().request
def timeout_request(lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : int ,lowerCAmelCase_ : Optional[Any] ,**lowerCAmelCase_ : Tuple ):
# Change the url to an invalid url so that the connection hangs
UpperCAmelCase_= """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
UpperCAmelCase_= timeout
try:
return online_request(lowerCAmelCase_ ,lowerCAmelCase_ ,**lowerCAmelCase_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCAmelCase_= url
UpperCAmelCase_= e.args[0]
UpperCAmelCase_= (max_retry_error.args[0].replace("""10.255.255.1""" ,F"""OfflineMock[{url}]""" ),)
UpperCAmelCase_= (max_retry_error,)
raise
def raise_connection_error(lowerCAmelCase_ : Dict ,lowerCAmelCase_ : str ,**lowerCAmelCase_ : Optional[int] ):
raise requests.ConnectionError("""Offline mode is enabled.""" ,request=lowerCAmelCase_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" ,lowerCAmelCase_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" ,lowerCAmelCase_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" ,lowerCAmelCase_ ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def __a ( *lowerCAmelCase_ : List[Any] ,**lowerCAmelCase_ : Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_= str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowerCAmelCase_ ,**lowerCAmelCase_ ) as tmp_dir:
try:
os.chdir(lowerCAmelCase_ )
yield
finally:
os.chdir(lowerCAmelCase_ )
@contextmanager
def __a ( ) -> List[str]:
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase_= pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __a ( ) -> Dict:
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase_= pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __a ( lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
return deepcopy(lowerCAmelCase_ ).integers(0 ,1_00 ,10 ).tolist() == deepcopy(lowerCAmelCase_ ).integers(0 ,1_00 ,10 ).tolist()
def __a ( lowerCAmelCase_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowerCAmelCase_ : Optional[int] ,*lowerCAmelCase_ : List[Any] ,**lowerCAmelCase_ : Optional[Any] ):
try:
return func(*lowerCAmelCase_ ,**lowerCAmelCase_ )
except HTTPError as err:
if str(lowerCAmelCase_ ).startswith("""500""" ) or str(lowerCAmelCase_ ).startswith("""502""" ):
pytest.xfail(str(lowerCAmelCase_ ) )
raise err
return decorator.decorator(_wrapper ,lowerCAmelCase_ )
class lowercase :
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any ) -> List[Any]:
UpperCAmelCase_= returncode
UpperCAmelCase_= stdout
UpperCAmelCase_= stderr
async def __a ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : List[str] ) -> str:
'''simple docstring'''
while True:
UpperCAmelCase_= await stream.readline()
if line:
callback(lowerCAmelCase_ )
else:
break
async def __a ( lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : int=None ,lowerCAmelCase_ : Union[str, Any]=None ,lowerCAmelCase_ : Tuple=None ,lowerCAmelCase_ : int=False ,lowerCAmelCase_ : Dict=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print("""\nRunning: """ ,""" """.join(lowerCAmelCase_ ) )
UpperCAmelCase_= await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=lowerCAmelCase_ ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=lowerCAmelCase_ ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_= []
UpperCAmelCase_= []
def tee(lowerCAmelCase_ : Any ,lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : Any="" ):
UpperCAmelCase_= line.decode("""utf-8""" ).rstrip()
sink.append(lowerCAmelCase_ )
if not quiet:
print(lowerCAmelCase_ ,lowerCAmelCase_ ,file=lowerCAmelCase_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda lowerCAmelCase_ : tee(lowerCAmelCase_ ,lowerCAmelCase_ ,sys.stdout ,label="""stdout:""" ) ),
_read_stream(p.stderr ,lambda lowerCAmelCase_ : tee(lowerCAmelCase_ ,lowerCAmelCase_ ,sys.stderr ,label="""stderr:""" ) ),
] ,timeout=lowerCAmelCase_ ,)
return _RunOutput(await p.wait() ,lowerCAmelCase_ ,lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : int=None ,lowerCAmelCase_ : int=None ,lowerCAmelCase_ : str=1_80 ,lowerCAmelCase_ : Dict=False ,lowerCAmelCase_ : Any=True ) -> _RunOutput:
'''simple docstring'''
UpperCAmelCase_= asyncio.get_event_loop()
UpperCAmelCase_= loop.run_until_complete(
_stream_subprocess(lowerCAmelCase_ ,env=lowerCAmelCase_ ,stdin=lowerCAmelCase_ ,timeout=lowerCAmelCase_ ,quiet=lowerCAmelCase_ ,echo=lowerCAmelCase_ ) )
UpperCAmelCase_= """ """.join(lowerCAmelCase_ )
if result.returncode > 0:
UpperCAmelCase_= """\n""".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def __a ( ) -> int:
'''simple docstring'''
UpperCAmelCase_= os.environ.get("""PYTEST_XDIST_WORKER""" ,"""gw0""" )
UpperCAmelCase_= re.sub(r"""^gw""" ,"""""" ,lowerCAmelCase_ ,0 ,re.M )
return int(lowerCAmelCase_ )
def __a ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_= 2_95_00
UpperCAmelCase_= pytest_xdist_worker_id()
return port + uniq_delta
| 277
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__A = 16
__A = 32
def __a ( lowerCAmelCase_ : Accelerator ,lowerCAmelCase_ : int = 16 ,lowerCAmelCase_ : str = "bert-base-cased" ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_= AutoTokenizer.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase_= load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(lowerCAmelCase_ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_= tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowerCAmelCase_ ,max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_= datasets.map(
lowerCAmelCase_ ,batched=lowerCAmelCase_ ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,load_from_cache_file=lowerCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_= tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(lowerCAmelCase_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase_ ,padding="""max_length""" ,max_length=1_28 ,return_tensors="""pt""" )
return tokenizer.pad(lowerCAmelCase_ ,padding="""longest""" ,return_tensors="""pt""" )
# Instantiate dataloaders.
UpperCAmelCase_= DataLoader(
tokenized_datasets["""train"""] ,shuffle=lowerCAmelCase_ ,collate_fn=lowerCAmelCase_ ,batch_size=lowerCAmelCase_ )
UpperCAmelCase_= DataLoader(
tokenized_datasets["""validation"""] ,shuffle=lowerCAmelCase_ ,collate_fn=lowerCAmelCase_ ,batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_= Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_= config["""lr"""]
UpperCAmelCase_= int(config["""num_epochs"""] )
UpperCAmelCase_= int(config["""seed"""] )
UpperCAmelCase_= int(config["""batch_size"""] )
UpperCAmelCase_= args.model_name_or_path
set_seed(lowerCAmelCase_ )
UpperCAmelCase_, UpperCAmelCase_= get_dataloaders(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_= AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ ,return_dict=lowerCAmelCase_ )
# Instantiate optimizer
UpperCAmelCase_= (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_= optimizer_cls(params=model.parameters() ,lr=lowerCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_= accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCAmelCase_= 1
UpperCAmelCase_= (len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_= get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ ,num_warmup_steps=0 ,num_training_steps=lowerCAmelCase_ ,)
else:
UpperCAmelCase_= DummyScheduler(lowerCAmelCase_ ,total_num_steps=lowerCAmelCase_ ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= accelerator.prepare(
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_= 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_= 0
# Now we train the model
UpperCAmelCase_= evaluate.load("""glue""" ,"""mrpc""" )
UpperCAmelCase_= 0
UpperCAmelCase_= {}
for epoch in range(lowerCAmelCase_ ,lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
UpperCAmelCase_= model(**lowerCAmelCase_ )
UpperCAmelCase_= outputs.loss
UpperCAmelCase_= loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCAmelCase_= 0
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_= model(**lowerCAmelCase_ )
UpperCAmelCase_= outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase_, UpperCAmelCase_= accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase_ ) - 1:
UpperCAmelCase_= predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_= references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase_ ,references=lowerCAmelCase_ ,)
UpperCAmelCase_= metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,lowerCAmelCase_ )
UpperCAmelCase_= eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
UpperCAmelCase_= eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,"""all_results.json""" ) ,"""w""" ) as f:
json.dump(lowerCAmelCase_ ,lowerCAmelCase_ )
def __a ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_= argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" ,type=lowerCAmelCase_ ,default="""bert-base-cased""" ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=lowerCAmelCase_ ,)
parser.add_argument(
"""--output_dir""" ,type=lowerCAmelCase_ ,default=""".""" ,help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" ,)
parser.add_argument(
"""--performance_lower_bound""" ,type=lowerCAmelCase_ ,default=lowerCAmelCase_ ,help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" ,)
parser.add_argument(
"""--num_epochs""" ,type=lowerCAmelCase_ ,default=3 ,help="""Number of train epochs.""" ,)
UpperCAmelCase_= parser.parse_args()
UpperCAmelCase_= {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ ,lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 277
| 1
|
"""simple docstring"""
import json
import sys
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any ):
with open(__SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
lowercase_ : Union[str, Any] = json.load(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(__SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = results[benchmark_name]
lowercase_ : Optional[Any] = benchmark_name.split('/' )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
lowercase_ : Any = '| metric |'
lowercase_ : List[str] = '|--------|'
lowercase_ : Any = '| new / old (diff) |'
for metric_name in sorted(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = benchmark_res[metric_name]
lowercase_ : Any = metric_vals['new']
lowercase_ : List[Any] = metric_vals.get('old' , __SCREAMING_SNAKE_CASE )
lowercase_ : Dict = metric_vals.get('diff' , __SCREAMING_SNAKE_CASE )
lowercase_ : Dict = F''' {new_val:f}''' if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ) else 'None'
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =sys.argv[1]
__SCREAMING_SNAKE_CASE =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 213
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
lowercase = ['pixel_values']
def __init__( self ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = 0.9 ,__UpperCamelCase = PILImageResampling.BICUBIC ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = 1 / 255 ,__UpperCamelCase = True ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> None:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
lowercase_ : Optional[int] = size if size is not None else {'shortest_edge': 224}
lowercase_ : Union[str, Any] = get_size_dict(__UpperCamelCase ,default_to_square=__UpperCamelCase )
lowercase_ : Union[str, Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowercase_ : Optional[int] = get_size_dict(__UpperCamelCase ,param_name='crop_size' )
lowercase_ : List[str] = do_resize
lowercase_ : List[Any] = size
lowercase_ : int = crop_pct
lowercase_ : Dict = resample
lowercase_ : List[str] = do_center_crop
lowercase_ : Union[str, Any] = crop_size
lowercase_ : List[Any] = do_rescale
lowercase_ : Tuple = rescale_factor
lowercase_ : Tuple = do_normalize
lowercase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ : int = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = PILImageResampling.BICUBIC ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> np.ndarray:
'''simple docstring'''
lowercase_ : Any = get_size_dict(__UpperCamelCase ,default_to_square=__UpperCamelCase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
lowercase_ : Union[str, Any] = int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowercase_ : Tuple = int(size['height'] / crop_pct )
else:
lowercase_ : Dict = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(__UpperCamelCase ) )
lowercase_ : int = get_resize_output_image_size(__UpperCamelCase ,size=__UpperCamelCase ,default_to_square=__UpperCamelCase )
else:
if "shortest_edge" in size:
lowercase_ : Optional[int] = get_resize_output_image_size(__UpperCamelCase ,size=size['shortest_edge'] ,default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
lowercase_ : Dict = (size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(__UpperCamelCase ) )
return resize(__UpperCamelCase ,size=__UpperCamelCase ,resample=__UpperCamelCase ,data_format=__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> np.ndarray:
'''simple docstring'''
lowercase_ : List[Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__UpperCamelCase ,size=(size['height'], size['width']) ,data_format=__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> str:
'''simple docstring'''
return rescale(__UpperCamelCase ,scale=__UpperCamelCase ,data_format=__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> np.ndarray:
'''simple docstring'''
return normalize(__UpperCamelCase ,mean=__UpperCamelCase ,std=__UpperCamelCase ,data_format=__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = ChannelDimension.FIRST ,**__UpperCamelCase ,) -> PIL.Image.Image:
'''simple docstring'''
lowercase_ : List[Any] = do_resize if do_resize is not None else self.do_resize
lowercase_ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
lowercase_ : List[str] = resample if resample is not None else self.resample
lowercase_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : str = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : str = image_mean if image_mean is not None else self.image_mean
lowercase_ : Tuple = image_std if image_std is not None else self.image_std
lowercase_ : Optional[Any] = size if size is not None else self.size
lowercase_ : Tuple = get_size_dict(__UpperCamelCase ,default_to_square=__UpperCamelCase )
lowercase_ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
lowercase_ : List[str] = get_size_dict(__UpperCamelCase ,param_name='crop_size' )
lowercase_ : str = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowercase_ : Optional[Any] = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
lowercase_ : str = [self.resize(image=__UpperCamelCase ,size=__UpperCamelCase ,crop_pct=__UpperCamelCase ,resample=__UpperCamelCase ) for image in images]
if do_center_crop:
lowercase_ : str = [self.center_crop(image=__UpperCamelCase ,size=__UpperCamelCase ) for image in images]
if do_rescale:
lowercase_ : Any = [self.rescale(image=__UpperCamelCase ,scale=__UpperCamelCase ) for image in images]
if do_normalize:
lowercase_ : int = [self.normalize(image=__UpperCamelCase ,mean=__UpperCamelCase ,std=__UpperCamelCase ) for image in images]
lowercase_ : Dict = [to_channel_dimension_format(__UpperCamelCase ,__UpperCamelCase ) for image in images]
lowercase_ : Any = {'pixel_values': images}
return BatchFeature(data=__UpperCamelCase ,tensor_type=__UpperCamelCase )
| 213
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __a ( lowerCamelCase__ ):
def __init__( self , a__ , a__ ):
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowerCamelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=a__ , scheduler=a__ )
@torch.no_grad()
def __call__( self , a__ = 1 , a__ = None , a__ = 0.0 , a__ = 50 , a__ = None , a__ = "pil" , a__ = True , ):
if isinstance(self.unet.config.sample_size , a__ ):
_lowerCamelCase = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_lowerCamelCase = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(a__ )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
_lowerCamelCase = randn_tensor(a__ , generator=a__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(a__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowerCamelCase = self.unet(a__ , a__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowerCamelCase = self.scheduler.step(
a__ , a__ , a__ , eta=a__ , use_clipped_model_output=a__ , generator=a__ ).prev_sample
_lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCamelCase = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 356
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def SCREAMING_SNAKE_CASE_ ( )-> List[Any]:
_lowerCamelCase = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=snake_case )
_lowerCamelCase = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=snake_case )
env_command_parser(subparsers=snake_case )
launch_command_parser(subparsers=snake_case )
tpu_command_parser(subparsers=snake_case )
test_command_parser(subparsers=snake_case )
# Let's go
_lowerCamelCase = parser.parse_args()
if not hasattr(snake_case , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(snake_case )
if __name__ == "__main__":
main()
| 80
| 0
|
'''simple docstring'''
def lowercase__ ( __lowercase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = abs(UpperCAmelCase_ )
__UpperCamelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowercase__ ( __lowercase : Dict ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = abs(UpperCAmelCase_ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def lowercase__ ( __lowercase : str ) -> int:
"""simple docstring"""
return sum(int(UpperCAmelCase_ ) for c in str(abs(UpperCAmelCase_ ) ) )
def lowercase__ ( ) -> str:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowercase : Dict , __lowercase : int ) -> None:
__UpperCamelCase = F'''{func.__name__}({value})'''
__UpperCamelCase = timeit(F'''__main__.{call}''' , setup='import __main__' )
print(F'''{call:56} = {func(UpperCAmelCase_ )} -- {timing:.4f} seconds''' )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 53
|
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
if num < 0:
return False
_UpperCamelCase : int = num
_UpperCamelCase : int = 0
while num > 0:
_UpperCamelCase : str = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83
| 0
|
from math import sqrt
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int = 1_0_0_0_0_0_0 ):
'''simple docstring'''
__snake_case : int = 0
__snake_case : int = 0
__snake_case : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_a , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F'''{solution() = }''')
| 364
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase_ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
lowercase_ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : str = create_model(
"""HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : Union[str, Any] = {}
__snake_case : List[Any] = R""".*sequential.(\d+).*"""
__snake_case : Union[str, Any] = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
__snake_case : Optional[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 )
__snake_case : Dict = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.''' )
elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case : str = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__snake_case : List[Any] = 1 if projecton_layer == 0 else 2
__snake_case : Tuple = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case : Optional[int] = value
__snake_case : Any = mixed_qkv.size(0 ) // 3
__snake_case : List[Any] = mixed_qkv[:qkv_dim]
__snake_case : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case : List[Any] = mixed_qkv[qkv_dim * 2 :]
__snake_case : Any = query_layer
__snake_case : Dict = key_layer
__snake_case : Optional[Any] = value_layer
else:
__snake_case : List[str] = value
return model_state_dict
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : List[str] = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE )
clap_model.eval()
__snake_case : Tuple = clap_model.state_dict()
__snake_case : Union[str, Any] = rename_state_dict(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = ClapConfig()
__snake_case : Tuple = enable_fusion
__snake_case : Any = ClapModel(__SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
lowercase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 20
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = 'vit_msn'
def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
| 24
|
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__snake_case = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __lowerCAmelCase ( lowercase : Any ) -> Union[str, Any]:
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __lowerCAmelCase ( lowercase : int , lowercase : Dict ) -> Tuple:
"""simple docstring"""
if args.student_type == "roberta":
snake_case : List[str] = False
elif args.student_type == "gpt2":
snake_case : Optional[int] = False
def __lowerCAmelCase ( lowercase : Optional[Any] , lowercase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if args.student_type == "roberta":
snake_case : Optional[Any] = False
def __lowerCAmelCase ( ) -> int:
"""simple docstring"""
snake_case : Any = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=lowercase , required=lowercase , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=lowercase , required=lowercase , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=lowercase , choices=["distilbert", "roberta", "gpt2"] , required=lowercase , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=lowercase , required=lowercase , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=lowercase , type=lowercase , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=lowercase , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=lowercase , required=lowercase , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=lowercase , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=lowercase , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=lowercase , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=lowercase , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=lowercase , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=lowercase , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=lowercase , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=lowercase , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=lowercase , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=lowercase , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=lowercase , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=lowercase , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=lowercase , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=lowercase , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=lowercase , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=lowercase , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=lowercase , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5e-4 , type=lowercase , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1e-6 , type=lowercase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=lowercase , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=lowercase , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=lowercase , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=lowercase , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=lowercase , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=lowercase , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=lowercase , default=500 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=lowercase , default=4000 , help="Checkpoint interval." )
snake_case : str = parser.parse_args()
sanity_checks(lowercase )
# ARGS #
init_gpu_params(lowercase )
set_seed(lowercase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(F'Param: {args}' )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(lowercase ) , lowercase , indent=4 )
git_log(args.dump_path )
snake_case ,snake_case ,snake_case : int = MODEL_CLASSES[args.student_type]
snake_case ,snake_case ,snake_case : int = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
snake_case : int = teacher_tokenizer_class.from_pretrained(args.teacher_name )
snake_case : Any = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
snake_case : List[str] = tokenizer.all_special_tokens.index(lowercase )
snake_case : Optional[int] = tokenizer.all_special_ids[idx]
logger.info(F'Special tokens {special_tok_ids}' )
snake_case : Any = special_tok_ids
snake_case : Dict = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'Loading data from {args.data_file}' )
with open(args.data_file , "rb" ) as fp:
snake_case : Optional[int] = pickle.load(lowercase )
if args.mlm:
logger.info(F'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , "rb" ) as fp:
snake_case : Dict = pickle.load(lowercase )
snake_case : str = np.maximum(lowercase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
snake_case : str = 0.0 # do not predict special tokens
snake_case : Dict = torch.from_numpy(lowercase )
else:
snake_case : Tuple = None
snake_case : str = LmSeqsDataset(params=lowercase , data=lowercase )
logger.info("Data loader created." )
# STUDENT #
logger.info(F'Loading student config from {args.student_config}' )
snake_case : str = student_config_class.from_pretrained(args.student_config )
snake_case : Optional[int] = True
if args.student_pretrained_weights is not None:
logger.info(F'Loading pretrained weights from {args.student_pretrained_weights}' )
snake_case : Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowercase )
else:
snake_case : Union[str, Any] = student_model_class(lowercase )
if args.n_gpu > 0:
student.to(F'cuda:{args.local_rank}' )
logger.info("Student loaded." )
# TEACHER #
snake_case : List[Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowercase )
if args.n_gpu > 0:
teacher.to(F'cuda:{args.local_rank}' )
logger.info(F'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowercase , lowercase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowercase , lowercase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
snake_case : Optional[Any] = Distiller(
params=lowercase , dataset=lowercase , token_probs=lowercase , student=lowercase , teacher=lowercase )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 203
| 0
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A :
def __init__( self, UpperCamelCase__, UpperCamelCase__=2, UpperCamelCase__=3, UpperCamelCase__=4, UpperCamelCase__=2, UpperCamelCase__=7, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=99, UpperCamelCase__=36, UpperCamelCase__=3, UpperCamelCase__=4, UpperCamelCase__=37, UpperCamelCase__="gelu", UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=512, UpperCamelCase__=16, UpperCamelCase__=2, UpperCamelCase__=0.02, UpperCamelCase__=6, UpperCamelCase__=6, UpperCamelCase__=3, UpperCamelCase__=4, UpperCamelCase__=None, UpperCamelCase__=1000, ):
"""simple docstring"""
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = text_seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_input_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = coordinate_size
lowerCAmelCase_ = shape_size
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_choices
lowerCAmelCase_ = scope
lowerCAmelCase_ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCAmelCase_ = text_seq_length
lowerCAmelCase_ = (image_size // patch_size) ** 2 + 1
lowerCAmelCase_ = self.text_seq_length + self.image_seq_length
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase_ = bbox[i, j, 3]
lowerCAmelCase_ = bbox[i, j, 1]
lowerCAmelCase_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase_ = bbox[i, j, 2]
lowerCAmelCase_ = bbox[i, j, 0]
lowerCAmelCase_ = t
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_input_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCAmelCase_ = None
if self.use_token_type_ids:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels )
lowerCAmelCase_ = LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = LayoutLMvaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# text + image
lowerCAmelCase_ = model(UpperCamelCase__, pixel_values=UpperCamelCase__ )
lowerCAmelCase_ = model(
UpperCamelCase__, bbox=UpperCamelCase__, pixel_values=UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__ )
lowerCAmelCase_ = model(UpperCamelCase__, bbox=UpperCamelCase__, pixel_values=UpperCamelCase__, token_type_ids=UpperCamelCase__ )
lowerCAmelCase_ = model(UpperCamelCase__, bbox=UpperCamelCase__, pixel_values=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCAmelCase_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCAmelCase_ = model(pixel_values=UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = LayoutLMvaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = model(
UpperCamelCase__, bbox=UpperCamelCase__, pixel_values=UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__, labels=UpperCamelCase__, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = LayoutLMvaForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = model(
UpperCamelCase__, bbox=UpperCamelCase__, pixel_values=UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__, labels=UpperCamelCase__, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = LayoutLMvaForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = model(
UpperCamelCase__, bbox=UpperCamelCase__, pixel_values=UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__, start_positions=UpperCamelCase__, end_positions=UpperCamelCase__, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = config_and_inputs
lowerCAmelCase_ = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__snake_case = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
return True
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = LayoutLMvaModelTester(self )
lowerCAmelCase_ = ConfigTester(self, config_class=UpperCamelCase__, hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__=False ):
"""simple docstring"""
lowerCAmelCase_ = copy.deepcopy(UpperCamelCase__ )
if model_class in get_values(UpperCamelCase__ ):
lowerCAmelCase_ = {
k: v.unsqueeze(1 ).expand(-1, self.model_tester.num_choices, -1 ).contiguous()
if isinstance(UpperCamelCase__, torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
lowerCAmelCase_ = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=UpperCamelCase__ )
elif model_class in get_values(UpperCamelCase__ ):
lowerCAmelCase_ = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=UpperCamelCase__ )
lowerCAmelCase_ = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
lowerCAmelCase_ = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
lowerCAmelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=UpperCamelCase__, )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = LayoutLMvaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __UpperCamelCase ( ):
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class A ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(UpperCamelCase__ )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=UpperCamelCase__, return_tensors='''pt''' ).pixel_values.to(UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([[1, 2]] )
lowerCAmelCase_ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCAmelCase_ = model(
input_ids=input_ids.to(UpperCamelCase__ ), bbox=bbox.to(UpperCamelCase__ ), pixel_values=pixel_values.to(UpperCamelCase__ ), )
# verify the logits
lowerCAmelCase_ = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1E-4 ) )
| 167
|
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_A = logging.get_logger(__name__)
def __UpperCamelCase ( _A , _A , _A , _A=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowerCAmelCase_ = os.path.abspath(_A )
logger.info(f"Loading PyTorch weights from {pt_path}" )
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )
logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters." )
lowerCAmelCase_ = convert_pytorch_state_dict_to_flax(_A , _A )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCAmelCase_ = convert_pytorch_sharded_state_dict_to_flax(_A , _A )
return flax_state_dict
def __UpperCamelCase ( _A , _A , _A , _A , ):
def is_key_or_prefix_key_in_dict(_A ) -> bool:
return len(set(_A ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_A ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_A ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_A ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_A ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_A ):
lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_A ):
lowerCAmelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCAmelCase_ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCAmelCase_ = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCAmelCase_ = pt_tuple_key[-2] + '''_v'''
if name is not None:
lowerCAmelCase_ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCamelCase ( _A , _A ):
# convert pytorch tensor to numpy
lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCAmelCase_ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCAmelCase_ = flax_model.params['''params''']
else:
lowerCAmelCase_ = flax_model.params
lowerCAmelCase_ = flatten_dict(_A )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCAmelCase_ = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(_A )
lowerCAmelCase_ = {}
lowerCAmelCase_ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCAmelCase_ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase_ = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCAmelCase_ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCAmelCase_ = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(
_A , _A , _A , _A )
# add model prefix if necessary
lowerCAmelCase_ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCAmelCase_ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCAmelCase_ = jnp.asarray(_A )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_A , _A )
continue
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(_A )
else:
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(_A )
return unflatten_dict(_A )
def __UpperCamelCase ( _A , _A ):
import torch
# Load the index
lowerCAmelCase_ = {}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCAmelCase_ = torch.load(_A )
lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCAmelCase_ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCAmelCase_ = flax_model.params['''params''']
lowerCAmelCase_ = flatten_dict(_A )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowerCAmelCase_ = flax_model.params
lowerCAmelCase_ = flatten_dict(_A )
lowerCAmelCase_ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCAmelCase_ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase_ = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCAmelCase_ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCAmelCase_ = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(
_A , _A , _A , _A )
# add model prefix if necessary
lowerCAmelCase_ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCAmelCase_ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCAmelCase_ = jnp.asarray(_A )
continue
if "var" in flax_key[-1]:
lowerCAmelCase_ = jnp.asarray(_A )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_A , _A )
continue
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(_A )
else:
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(_A )
return unflatten_dict(_A )
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = os.path.abspath(_A )
logger.info(f"Loading Flax weights from {flax_checkpoint_path}" )
# import correct flax class
lowerCAmelCase_ = getattr(_A , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(_A , '''rb''' ) as state_f:
try:
lowerCAmelCase_ = from_bytes(_A , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"Unable to convert {flax_checkpoint_path} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(_A , _A )
def __UpperCamelCase ( _A , _A ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowerCAmelCase_ = flatten_dict(jax.tree_util.tree_map(lambda _A : x.dtype == jnp.bfloataa , _A ) ).values()
if any(_A ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowerCAmelCase_ = jax.tree_util.tree_map(
lambda _A : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _A )
lowerCAmelCase_ = flatten_dict(_A )
lowerCAmelCase_ = pt_model.state_dict()
lowerCAmelCase_ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowerCAmelCase_ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCAmelCase_ = []
lowerCAmelCase_ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCAmelCase_ = flax_key_tuple[0] == pt_model.base_model_prefix
lowerCAmelCase_ = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCAmelCase_ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCAmelCase_ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_A ) not in pt_model_dict:
# conv layer
lowerCAmelCase_ = flax_key_tuple[:-1] + ('''weight''',)
lowerCAmelCase_ = jnp.transpose(_A , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_A ) not in pt_model_dict:
# linear layer
lowerCAmelCase_ = flax_key_tuple[:-1] + ('''weight''',)
lowerCAmelCase_ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCAmelCase_ = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCAmelCase_ = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowerCAmelCase_ = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowerCAmelCase_ = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCAmelCase_ = '''.'''.join(_A )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCAmelCase_ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCAmelCase_ = key.split('''.''' )
lowerCAmelCase_ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCAmelCase_ = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCAmelCase_ = key_components[-2] + '''_v'''
if name is not None:
lowerCAmelCase_ = key_components[:-3] + [name]
lowerCAmelCase_ = '''.'''.join(_A )
lowerCAmelCase_ = key
if flax_key in special_pt_names:
lowerCAmelCase_ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
lowerCAmelCase_ = np.asarray(_A ) if not isinstance(_A , np.ndarray ) else flax_tensor
lowerCAmelCase_ = torch.from_numpy(_A )
# remove from missing keys
missing_keys.remove(_A )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_A )
pt_model.load_state_dict(_A )
# re-transform missing_keys to list
lowerCAmelCase_ = list(_A )
if len(_A ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(f"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n" )
if len(_A ) > 0:
logger.warning(
f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
''' use it for predictions and inference.''' )
else:
logger.warning(
f"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"
'''If your task is similar to the task the model of the checkpoint was trained on, '''
f"you can already use {pt_model.__class__.__name__} for predictions without further training." )
return pt_model
| 167
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ :Dict = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Tuple = ["YolosFeatureExtractor"]
a_ :List[str] = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
a_ :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 277
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ :Dict = logging.get_logger(__name__)
def lowercase_ (A : Optional[Any] , A : Any=False ):
snake_case__ : List[Any] = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
snake_case__ : str = 'segformer.encoder.' + key
if key.startswith('backbone' ):
snake_case__ : str = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
snake_case__ : Optional[int] = key[key.find('patch_embed' ) + len('patch_embed' )]
snake_case__ : int = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(A )-1}''' )
if "norm" in key:
snake_case__ : Optional[int] = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
snake_case__ : Tuple = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
snake_case__ : Union[str, Any] = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(A )-1}''' )
if "layer_norm1" in key:
snake_case__ : List[Any] = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
snake_case__ : List[Any] = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
snake_case__ : List[Any] = key[key.find('block' ) + len('block' )]
snake_case__ : List[Any] = key.replace(F'''block{idx}''' , F'''block.{int(A )-1}''' )
if "attn.q" in key:
snake_case__ : int = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
snake_case__ : str = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
snake_case__ : Optional[int] = key.replace('attn' , 'attention.self' )
if "fc1" in key:
snake_case__ : str = key.replace('fc1' , 'dense1' )
if "fc2" in key:
snake_case__ : Dict = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
snake_case__ : Union[str, Any] = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
snake_case__ : List[str] = key.replace('linear_fuse.conv' , 'linear_fuse' )
snake_case__ : List[Any] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
snake_case__ : Optional[int] = key[key.find('linear_c' ) + len('linear_c' )]
snake_case__ : Tuple = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(A )-1}''' )
if key.startswith('head' ):
snake_case__ : Tuple = key.replace('head' , 'classifier' )
snake_case__ : Optional[int] = value
return new_state_dict
def lowercase_ (A : Tuple , A : Optional[int] ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
snake_case__ : List[str] = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
snake_case__ : Optional[Any] = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
snake_case__ : str = kv_weight[
: config.hidden_sizes[i], :
]
snake_case__ : Dict = kv_bias[: config.hidden_sizes[i]]
snake_case__ : List[str] = kv_weight[
config.hidden_sizes[i] :, :
]
snake_case__ : List[Any] = kv_bias[
config.hidden_sizes[i] :
]
def lowercase_ ():
snake_case__ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : Dict = Image.open(requests.get(A , stream=A ).raw )
return image
@torch.no_grad()
def lowercase_ (A : Any , A : Union[str, Any] , A : Optional[Any] ):
snake_case__ : List[str] = SegformerConfig()
snake_case__ : Dict = False
# set attributes based on model_name
snake_case__ : Optional[int] = 'huggingface/label-files'
if "segformer" in model_name:
snake_case__ : str = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
snake_case__ : Optional[int] = 1_5_0
snake_case__ : int = 'ade20k-id2label.json'
snake_case__ : List[Any] = (1, 1_5_0, 1_2_8, 1_2_8)
elif "city" in model_name:
snake_case__ : str = 1_9
snake_case__ : List[str] = 'cityscapes-id2label.json'
snake_case__ : Optional[Any] = (1, 1_9, 1_2_8, 1_2_8)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
snake_case__ : str = True
snake_case__ : Union[str, Any] = model_name[4:6]
snake_case__ : Optional[Any] = 1_0_0_0
snake_case__ : Optional[int] = 'imagenet-1k-id2label.json'
snake_case__ : List[Any] = (1, 1_0_0_0)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
snake_case__ : str = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
snake_case__ : List[Any] = {int(A ): v for k, v in idalabel.items()}
snake_case__ : Union[str, Any] = idalabel
snake_case__ : Tuple = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
snake_case__ : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
snake_case__ : Tuple = 2_5_6
elif size == "b2":
snake_case__ : List[str] = [6_4, 1_2_8, 3_2_0, 5_1_2]
snake_case__ : int = 7_6_8
snake_case__ : List[Any] = [3, 4, 6, 3]
elif size == "b3":
snake_case__ : Optional[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
snake_case__ : int = 7_6_8
snake_case__ : Optional[Any] = [3, 4, 1_8, 3]
elif size == "b4":
snake_case__ : str = [6_4, 1_2_8, 3_2_0, 5_1_2]
snake_case__ : Optional[Any] = 7_6_8
snake_case__ : Union[str, Any] = [3, 8, 2_7, 3]
elif size == "b5":
snake_case__ : List[str] = [6_4, 1_2_8, 3_2_0, 5_1_2]
snake_case__ : Optional[Any] = 7_6_8
snake_case__ : Any = [3, 6, 4_0, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
snake_case__ : Dict = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=A , align=A , do_random_crop=A )
# prepare image
snake_case__ : List[str] = prepare_img()
snake_case__ : Dict = image_processor(images=A , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
snake_case__ : Tuple = torch.load(A , map_location=torch.device('cpu' ) )
else:
snake_case__ : int = torch.load(A , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
snake_case__ : List[Any] = rename_keys(A , encoder_only=A )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(A , A )
# create HuggingFace model and load state dict
if encoder_only:
snake_case__ : str = False
snake_case__ : List[Any] = SegformerForImageClassification(A )
else:
snake_case__ : Dict = SegformerForSemanticSegmentation(A )
model.load_state_dict(A )
model.eval()
# forward pass
snake_case__ : int = model(A )
snake_case__ : Any = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
snake_case__ : Dict = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
snake_case__ : Optional[int] = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
snake_case__ : List[Any] = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
snake_case__ : Union[str, Any] = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
snake_case__ : Dict = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
snake_case__ : List[Any] = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
snake_case__ : str = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
snake_case__ : Tuple = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
snake_case__ : Any = torch.tensor(
[
[
[-1.1_372e01, -1.2_787e01, -1.3_477e01],
[-1.2_536e01, -1.4_194e01, -1.4_409e01],
[-1.3_217e01, -1.4_888e01, -1.5_327e01],
],
[
[-1.4_791e01, -1.7_122e01, -1.8_277e01],
[-1.7_163e01, -1.9_192e01, -1.9_533e01],
[-1.7_897e01, -1.9_991e01, -2.0_315e01],
],
[
[7.6_723e-01, 4.1_921e-01, -7.7_878e-02],
[4.7_772e-01, 9.5_557e-03, -2.8_082e-01],
[3.6_032e-01, -2.4_826e-01, -5.1_168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
snake_case__ : Optional[int] = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
snake_case__ : Union[str, Any] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
snake_case__ : List[str] = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
snake_case__ : List[Any] = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
snake_case__ : str = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
snake_case__ : List[str] = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
snake_case__ : Tuple = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , A , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
a_ :Union[str, Any] = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 277
| 1
|
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ="The Nymphenburg Palace is a beautiful palace in Munich!"
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : str ):
A__ = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 10_24,
"hidden_size": 7_68,
"max_length": 5_12,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 10_24,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
A__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
A__ = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=_lowerCamelCase , output_all_encodings=_lowerCamelCase , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , _lowerCamelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
A__ = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
A__ = os.path.join(get_home_dir() , "models" )
A__ = _load_vocab(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , cls=_lowerCamelCase )
A__ = nlp.model.BERTModel(
_lowerCamelCase , len(_lowerCamelCase ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=_lowerCamelCase , use_token_type_embed=_lowerCamelCase , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=_lowerCamelCase , use_decoder=_lowerCamelCase , )
original_bort.load_parameters(_lowerCamelCase , cast_dtype=_lowerCamelCase , ignore_extra=_lowerCamelCase )
A__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
A__ = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.0_2,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(_lowerCamelCase ),
}
A__ = BertConfig.from_dict(_lowerCamelCase )
A__ = BertForMaskedLM(_lowerCamelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_lowerCamelCase : List[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict ):
A__ = hf_param.shape
A__ = to_torch(params[gluon_param] )
A__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
A__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
A__ = hf_bort_model.bert.encoder.layer[i]
# self attention
A__ = layer.attention.self
A__ = check_and_map_params(
self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
A__ = check_and_map_params(
self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
A__ = check_and_map_params(
self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
A__ = check_and_map_params(
self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
A__ = check_and_map_params(
self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
A__ = check_and_map_params(
self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
A__ = layer.attention.output
A__ = check_and_map_params(
self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" )
A__ = check_and_map_params(
self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" )
A__ = check_and_map_params(
self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" )
A__ = check_and_map_params(
self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
A__ = layer.intermediate
A__ = check_and_map_params(
intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
A__ = check_and_map_params(
intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
A__ = layer.output
A__ = check_and_map_params(
bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
A__ = check_and_map_params(
bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
A__ = check_and_map_params(
bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
A__ = check_and_map_params(
bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
A__ = RobertaTokenizer.from_pretrained("roberta-base" )
A__ = tokenizer.encode_plus(_lowerCamelCase )["input_ids"]
# Get gluon output
A__ = mx.nd.array([input_ids] )
A__ = original_bort(inputs=_lowerCamelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_lowerCamelCase )
A__ = BertModel.from_pretrained(_lowerCamelCase )
hf_bort_model.eval()
A__ = tokenizer.encode_plus(_lowerCamelCase , return_tensors="pt" )
A__ = hf_bort_model(**_lowerCamelCase )[0]
A__ = output_gluon[0].asnumpy()
A__ = output_hf[0].detach().numpy()
A__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
A__ = np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , _lowerCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Tuple =parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 371
|
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__lowerCAmelCase : Optional[Any] ="\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__lowerCAmelCase : Union[str, Any] ="\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
__lowerCAmelCase : str ="\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCamelCase ( _lowerCamelCase : List[Any] ):
def remove_articles(_lowerCamelCase : Dict ):
A__ = re.compile(r"\b(a|an|the)\b" , re.UNICODE )
return re.sub(_lowerCamelCase , " " , _lowerCamelCase )
def white_space_fix(_lowerCamelCase : Tuple ):
return " ".join(text.split() )
def remove_punc(_lowerCamelCase : int ):
A__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCamelCase : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCamelCase ) ) ) )
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ):
return int(normalize_answer(_lowerCamelCase ) == normalize_answer(_lowerCamelCase ) )
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
A__ = [any(compute_exact(_lowerCamelCase , _lowerCamelCase ) for ref in refs ) for pred, refs in zip(_lowerCamelCase , _lowerCamelCase )]
return (sum(_lowerCamelCase ) / len(_lowerCamelCase )) * 1_00
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str ):
A__ = [rgram for rgrams in rgramslist for rgram in rgrams]
A__ = Counter(_lowerCamelCase )
A__ = Counter(_lowerCamelCase )
A__ = Counter()
for sgram, scount in sgramcounter.items():
A__ = scount * numref
A__ = Counter(_lowerCamelCase )
A__ = Counter()
for cgram, ccount in cgramcounter.items():
A__ = ccount * numref
# KEEP
A__ = sgramcounter_rep & cgramcounter_rep
A__ = keepgramcounter_rep & rgramcounter
A__ = sgramcounter_rep & rgramcounter
A__ = 0
A__ = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
A__ = 1
A__ = 1
if len(_lowerCamelCase ) > 0:
A__ = keeptmpscorea / len(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
A__ = keeptmpscorea / sum(keepgramcounterall_rep.values() )
A__ = 0
if keepscore_precision > 0 or keepscore_recall > 0:
A__ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
A__ = sgramcounter_rep - cgramcounter_rep
A__ = delgramcounter_rep - rgramcounter
A__ = sgramcounter_rep - rgramcounter
A__ = 0
A__ = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
A__ = 1
if len(_lowerCamelCase ) > 0:
A__ = deltmpscorea / len(_lowerCamelCase )
# ADDITION
A__ = set(_lowerCamelCase ) - set(_lowerCamelCase )
A__ = set(_lowerCamelCase ) & set(_lowerCamelCase )
A__ = set(_lowerCamelCase ) - set(_lowerCamelCase )
A__ = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
A__ = 1
A__ = 1
if len(_lowerCamelCase ) > 0:
A__ = addtmpscore / len(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
A__ = addtmpscore / len(_lowerCamelCase )
A__ = 0
if addscore_precision > 0 or addscore_recall > 0:
A__ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ):
A__ = len(_lowerCamelCase )
A__ = ssent.split(" " )
A__ = csent.split(" " )
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
for rsent in rsents:
A__ = rsent.split(" " )
A__ = []
A__ = []
A__ = []
ragramslist.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
A__ = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
A__ = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
A__ = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
A__ = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
A__ = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
A__ = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
A__ = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
A__ = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
A__ = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(_lowerCamelCase )
((A__), (A__), (A__)) = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((A__), (A__), (A__)) = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((A__), (A__), (A__)) = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((A__), (A__), (A__)) = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
A__ = sum([delascore, delascore, delascore, delascore] ) / 4
A__ = sum([addascore, addascore, addascore, addascore] ) / 4
A__ = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCamelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : bool = True , _lowerCamelCase : str = "13a" , _lowerCamelCase : bool = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
A__ = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
A__ = sacrebleu.metrics.bleu._get_tokenizer(_lowerCamelCase )()(_lowerCamelCase )
else:
A__ = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCamelCase )
elif tokenizer == "moses":
A__ = sacremoses.MosesTokenizer().tokenize(_lowerCamelCase , return_str=_lowerCamelCase , escape=_lowerCamelCase )
elif tokenizer == "penn":
A__ = sacremoses.MosesTokenizer().penn_tokenize(_lowerCamelCase , return_str=_lowerCamelCase )
else:
A__ = sentence
if not return_str:
A__ = normalized_sent.split()
return normalized_sent
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] ):
if not (len(_lowerCamelCase ) == len(_lowerCamelCase ) == len(_lowerCamelCase )):
raise ValueError("Sources length must match predictions and references lengths." )
A__ = 0
for src, pred, refs in zip(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
sari_score += SARIsent(normalize(_lowerCamelCase ) , normalize(_lowerCamelCase ) , [normalize(_lowerCamelCase ) for sent in refs] )
A__ = sari_score / len(_lowerCamelCase )
return 1_00 * sari_score
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : List[Any]="exp" , _lowerCamelCase : int=None , _lowerCamelCase : str=False , _lowerCamelCase : List[str]=False , _lowerCamelCase : Dict=False , ):
A__ = len(references[0] )
if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A__ = [[refs[i] for refs in references] for i in range(_lowerCamelCase )]
A__ = sacrebleu.corpus_bleu(
_lowerCamelCase , _lowerCamelCase , smooth_method=_lowerCamelCase , smooth_value=_lowerCamelCase , force=_lowerCamelCase , lowercase=_lowerCamelCase , use_effective_order=_lowerCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def UpperCAmelCase_ ( self :Any )-> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def UpperCAmelCase_ ( self :str , lowercase_ :Dict , lowercase_ :List[Any] , lowercase_ :int )-> int:
A__ = {}
result.update({"sari": compute_sari(sources=lowercase_ , predictions=lowercase_ , references=lowercase_ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=lowercase_ , references=lowercase_ )} )
result.update({"exact": compute_em(predictions=lowercase_ , references=lowercase_ )} )
return result
| 123
| 0
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Union[str, Any] = XLMRobertaTokenizer
lowerCamelCase : Dict = XLMRobertaTokenizerFast
lowerCamelCase : str = True
lowerCamelCase : Optional[int] = True
def __UpperCAmelCase ( self : Dict ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = XLMRobertaTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase = '<pad>'
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(UpperCAmelCase__ ) , 1_0_0_2 )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase = XLMRobertaTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __UpperCAmelCase ( self : List[Any] ) -> str:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.save_pretrained(UpperCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCAmelCase = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.from_pretrained(UpperCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCAmelCase__ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(UpperCAmelCase__ , legacy_format=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.save_pretrained(UpperCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.from_pretrained(UpperCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
shutil.rmtree(UpperCAmelCase__ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(UpperCAmelCase__ , legacy_format=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.save_pretrained(UpperCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.from_pretrained(UpperCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
shutil.rmtree(UpperCAmelCase__ )
@cached_property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCAmelCase__ , f.name )
lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=UpperCAmelCase__ )
lowerCAmelCase = pickle.dumps(UpperCAmelCase__ )
pickle.loads(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = 'I was born in 92000, and this is falsé.'
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase__ )
lowerCAmelCase = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(UpperCAmelCase__ )
lowerCAmelCase = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
lowerCAmelCase = 'Hello World!'
lowerCAmelCase = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
lowerCAmelCase = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCAmelCase = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def __UpperCAmelCase ( self : str ) -> Tuple:
# fmt: off
lowerCAmelCase = {'input_ids': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 4
|
'''simple docstring'''
from ....utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
class lowercase_ ( a__ ):
def __init__( self , a , a=None , a=20_48 ):
UpperCamelCase__ = config.__dict__
UpperCamelCase__ = modal_hidden_size
if num_labels:
UpperCamelCase__ = num_labels
| 80
| 0
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
UpperCAmelCase_ : Optional[Any] = get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class UpperCamelCase :
@add_start_docstrings(UpperCAmelCase__ )
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class UpperCamelCase :
@add_start_docstrings(UpperCAmelCase__ )
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class UpperCamelCase ( _UpperCAmelCase ):
@add_start_docstrings(UpperCAmelCase__ )
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ):
for processor in self:
A__ = inspect.signature(processor.__call__ ).parameters
if len(UpperCAmelCase__ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"""Make sure that all the required parameters: {list(function_args.keys() )} for """
F"""{processor.__class__} are passed to the logits processor.""" )
A__ = processor(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
else:
A__ = processor(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return scores
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or not (temperature > 0):
raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""" )
A__ = temperature
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = scores / self.temperature
return scores
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = -float("Inf" ) , UpperCAmelCase__ = 1 ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or (min_tokens_to_keep < 1):
raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
A__ = top_p
A__ = filter_value
A__ = min_tokens_to_keep
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ , A__ = lax.top_k(UpperCAmelCase__ , scores.shape[-1] )
A__ = jnp.full_like(UpperCAmelCase__ , self.filter_value )
A__ = jax.nn.softmax(UpperCAmelCase__ , axis=-1 ).cumsum(axis=-1 )
A__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A__ = jnp.roll(UpperCAmelCase__ , 1 )
score_mask |= score_mask.at[:, 0].set(UpperCAmelCase__ )
# min tokens to keep
A__ = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCAmelCase__ )
A__ = jnp.where(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
A__ = jax.lax.sort_key_val(UpperCAmelCase__ , UpperCAmelCase__ )[-1]
return next_scores
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = -float("Inf" ) , UpperCAmelCase__ = 1 ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or top_k <= 0:
raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
A__ = max(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = filter_value
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ , A__ = scores.shape
A__ = jnp.full(batch_size * vocab_size , self.filter_value )
A__ = min(self.top_k , scores.shape[-1] ) # Safety check
A__ , A__ = lax.top_k(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = jnp.broadcast_to((jnp.arange(UpperCAmelCase__ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A__ = topk_scores.flatten()
A__ = topk_indices.flatten() + shift
A__ = next_scores_flat.at[topk_indices_flat].set(UpperCAmelCase__ )
A__ = next_scores_flat.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
return next_scores
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ ):
A__ = bos_token_id
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = jnp.full(scores.shape , -float("inf" ) )
A__ = 1 - jnp.bool_(cur_len - 1 )
A__ = jnp.where(UpperCAmelCase__ , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCAmelCase__ )
return scores
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = max_length
A__ = eos_token_id
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = jnp.full(scores.shape , -float("inf" ) )
A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A__ = jnp.where(UpperCAmelCase__ , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCAmelCase__ )
return scores
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or min_length < 0:
raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or eos_token_id < 0:
raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
A__ = min_length
A__ = eos_token_id
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
# create boolean flag to decide if min length penalty should be applied
A__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A__ = jnp.where(UpperCAmelCase__ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , UpperCAmelCase__ )
return scores
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = list(UpperCAmelCase__ )
A__ = begin_index
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = 1 - jnp.bool_(cur_len - self.begin_index )
A__ = jnp.where(UpperCAmelCase__ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , UpperCAmelCase__ )
return scores
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ ):
A__ = list(UpperCAmelCase__ )
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ ):
A__ = dict(UpperCAmelCase__ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A__ = force_token_array.at[index].set(UpperCAmelCase__ )
A__ = jnp.intaa(UpperCAmelCase__ )
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
def _force_token(UpperCAmelCase__ ):
A__ = scores.shape[0]
A__ = self.force_token_array[generation_idx]
A__ = jnp.ones_like(UpperCAmelCase__ , dtype=scores.dtype ) * -float("inf" )
A__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A__ = lax.dynamic_update_slice(UpperCAmelCase__ , UpperCAmelCase__ , (0, current_token) )
return new_scores
A__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCAmelCase__ ) , lambda: scores , ) , )
return scores
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = generate_config.eos_token_id
A__ = generate_config.no_timestamps_token_id
A__ = generate_config.no_timestamps_token_id + 1
A__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(UpperCAmelCase__ , "max_initial_timestamp_index" ):
A__ = generate_config.max_initial_timestamp_index
else:
A__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A__ = model_config.vocab_size
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
# suppress <|notimestamps|> which is handled by without_timestamps
A__ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = jnp.where((cur_len - self.begin_index) >= 1 , UpperCAmelCase__ , UpperCAmelCase__ )
A__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCAmelCase__ , )
A__ = jnp.where((cur_len - self.begin_index) < 2 , UpperCAmelCase__ , UpperCAmelCase__ )
A__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCAmelCase__ , UpperCAmelCase__ , )
return jnp.where(
UpperCAmelCase__ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , UpperCAmelCase__ , )
A__ = jax.vmap(UpperCAmelCase__ )(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = jnp.where(cur_len == self.begin_index , UpperCAmelCase__ , UpperCAmelCase__ )
A__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCAmelCase__ , )
A__ = self.timestamp_begin + self.max_initial_timestamp_index
A__ = jnp.where(
UpperCAmelCase__ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , UpperCAmelCase__ , )
# if sum of probability over timestamps is above any other token, sample timestamp
A__ = jax.nn.log_softmax(UpperCAmelCase__ , axis=-1 )
def handle_cumulative_probs(UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , UpperCAmelCase__ , )
A__ = jax.vmap(UpperCAmelCase__ )(UpperCAmelCase__ , UpperCAmelCase__ )
return scores
| 198
|
import datasets
from .evaluate import evaluate
UpperCAmelCase_ : List[Any] = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
UpperCAmelCase_ : Any = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
UpperCAmelCase_ : Tuple = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
A__ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
A__ = evaluate(dataset=UpperCAmelCase__ , predictions=UpperCAmelCase__ )
return score
| 198
| 1
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ) ->List[Any]:
_SCREAMING_SNAKE_CASE = original_name.split(""".""" )[0]
_SCREAMING_SNAKE_CASE = key.split(""".""" )
_SCREAMING_SNAKE_CASE = int(key_list[key_list.index(__lowerCamelCase ) - 2] )
_SCREAMING_SNAKE_CASE = int(key_list[key_list.index(__lowerCamelCase ) - 1] )
_SCREAMING_SNAKE_CASE = orig_block_num - offset
_SCREAMING_SNAKE_CASE = key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def lowerCamelCase ( __lowerCamelCase : Optional[Any] ) ->Any:
_SCREAMING_SNAKE_CASE = OrderedDict()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0, 0
for key, value in state_dict.items():
if key.startswith("""network""" ):
_SCREAMING_SNAKE_CASE = key.replace("""network""" , """poolformer.encoder""" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""" ) and "patch_embed" not in key:
patch_emb_offset += 1
_SCREAMING_SNAKE_CASE = key[: key.find("""proj""" )]
_SCREAMING_SNAKE_CASE = key.replace(__lowerCamelCase , F'patch_embeddings.{total_embed_found}.' )
_SCREAMING_SNAKE_CASE = key.replace("""proj""" , """projection""" )
if key.endswith("""bias""" ):
total_embed_found += 1
if "patch_embeddings" in key:
_SCREAMING_SNAKE_CASE = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
_SCREAMING_SNAKE_CASE = replace_key_with_offset(__lowerCamelCase , __lowerCamelCase , """mlp.fc1""" , """output.conv1""" )
if "mlp.fc2" in key:
_SCREAMING_SNAKE_CASE = replace_key_with_offset(__lowerCamelCase , __lowerCamelCase , """mlp.fc2""" , """output.conv2""" )
if "norm1" in key:
_SCREAMING_SNAKE_CASE = replace_key_with_offset(__lowerCamelCase , __lowerCamelCase , """norm1""" , """before_norm""" )
if "norm2" in key:
_SCREAMING_SNAKE_CASE = replace_key_with_offset(__lowerCamelCase , __lowerCamelCase , """norm2""" , """after_norm""" )
if "layer_scale_1" in key:
_SCREAMING_SNAKE_CASE = replace_key_with_offset(__lowerCamelCase , __lowerCamelCase , """layer_scale_1""" , """layer_scale_1""" )
if "layer_scale_2" in key:
_SCREAMING_SNAKE_CASE = replace_key_with_offset(__lowerCamelCase , __lowerCamelCase , """layer_scale_2""" , """layer_scale_2""" )
if "head" in key:
_SCREAMING_SNAKE_CASE = key.replace("""head""" , """classifier""" )
_SCREAMING_SNAKE_CASE = value
return new_state_dict
def lowerCamelCase ( ) ->int:
_SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return image
@torch.no_grad()
def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : int ) ->Optional[Any]:
_SCREAMING_SNAKE_CASE = PoolFormerConfig()
# set attributes based on model_name
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE = model_name[-3:]
_SCREAMING_SNAKE_CASE = 1000
_SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
_SCREAMING_SNAKE_CASE = (1, 1000)
# set config attributes
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if size == "s12":
_SCREAMING_SNAKE_CASE = [2, 2, 6, 2]
_SCREAMING_SNAKE_CASE = [64, 128, 320, 512]
_SCREAMING_SNAKE_CASE = 4.0
_SCREAMING_SNAKE_CASE = 0.9
elif size == "s24":
_SCREAMING_SNAKE_CASE = [4, 4, 12, 4]
_SCREAMING_SNAKE_CASE = [64, 128, 320, 512]
_SCREAMING_SNAKE_CASE = 4.0
_SCREAMING_SNAKE_CASE = 0.9
elif size == "s36":
_SCREAMING_SNAKE_CASE = [6, 6, 18, 6]
_SCREAMING_SNAKE_CASE = [64, 128, 320, 512]
_SCREAMING_SNAKE_CASE = 4.0
_SCREAMING_SNAKE_CASE = 1e-6
_SCREAMING_SNAKE_CASE = 0.9
elif size == "m36":
_SCREAMING_SNAKE_CASE = [6, 6, 18, 6]
_SCREAMING_SNAKE_CASE = [96, 192, 384, 768]
_SCREAMING_SNAKE_CASE = 4.0
_SCREAMING_SNAKE_CASE = 1e-6
_SCREAMING_SNAKE_CASE = 0.95
elif size == "m48":
_SCREAMING_SNAKE_CASE = [8, 8, 24, 8]
_SCREAMING_SNAKE_CASE = [96, 192, 384, 768]
_SCREAMING_SNAKE_CASE = 4.0
_SCREAMING_SNAKE_CASE = 1e-6
_SCREAMING_SNAKE_CASE = 0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
_SCREAMING_SNAKE_CASE = PoolFormerImageProcessor(crop_pct=__lowerCamelCase )
# Prepare image
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="""pt""" ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
_SCREAMING_SNAKE_CASE = torch.load(__lowerCamelCase , map_location=torch.device("""cpu""" ) )
# rename keys
_SCREAMING_SNAKE_CASE = rename_keys(__lowerCamelCase )
# create HuggingFace model and load state dict
_SCREAMING_SNAKE_CASE = PoolFormerForImageClassification(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# Define image processor
_SCREAMING_SNAKE_CASE = PoolFormerImageProcessor(crop_pct=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="""pt""" ).pixel_values
# forward pass
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = outputs.logits
# define expected logit slices for different models
if size == "s12":
_SCREAMING_SNAKE_CASE = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
_SCREAMING_SNAKE_CASE = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
_SCREAMING_SNAKE_CASE = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
_SCREAMING_SNAKE_CASE = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
_SCREAMING_SNAKE_CASE = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowercase_ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 58
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __snake_case :
def __init__( self ,snake_case ,):
'''simple docstring'''
lowercase : Any = parent
lowercase : Tuple = 13
lowercase : str = 7
lowercase : Dict = True
lowercase : Dict = True
lowercase : str = True
lowercase : List[str] = True
lowercase : int = True
lowercase : Union[str, Any] = False
lowercase : Dict = False
lowercase : List[Any] = False
lowercase : List[Any] = 2
lowercase : Optional[Any] = 99
lowercase : int = 0
lowercase : Tuple = 32
lowercase : int = 2
lowercase : Tuple = 4
lowercase : List[Any] = 0.1
lowercase : Tuple = 0.1
lowercase : List[Any] = 512
lowercase : int = 16
lowercase : Dict = 2
lowercase : int = 0.02
lowercase : Union[str, Any] = 3
lowercase : Any = 4
lowercase : List[Any] = """last"""
lowercase : Tuple = True
lowercase : List[Any] = None
lowercase : Any = 0
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
lowercase : Tuple = None
if self.use_input_lengths:
lowercase : List[str] = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Tuple = None
if self.use_token_type_ids:
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
lowercase : List[str] = None
lowercase : List[str] = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase : str = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
lowercase : Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices )
lowercase : str = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Tuple = TFFlaubertModel(config=snake_case )
lowercase : str = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
lowercase : Optional[Any] = model(snake_case )
lowercase : List[Any] = [input_ids, input_mask]
lowercase : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : List[Any] = TFFlaubertWithLMHeadModel(snake_case )
lowercase : Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
lowercase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Tuple = TFFlaubertForQuestionAnsweringSimple(snake_case )
lowercase : Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths}
lowercase : Tuple = model(snake_case )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Union[str, Any] = TFFlaubertForSequenceClassification(snake_case )
lowercase : str = {"""input_ids""": input_ids, """lengths""": input_lengths}
lowercase : str = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Any = self.num_labels
lowercase : List[str] = TFFlaubertForTokenClassification(config=snake_case )
lowercase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Any = self.num_choices
lowercase : Dict = TFFlaubertForMultipleChoice(config=snake_case )
lowercase : Any = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Optional[Any] = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Dict = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Union[str, Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowercase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : int = config_and_inputs
lowercase : List[str] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Dict= (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_a : Optional[Any]= (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_a : Any= (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_a : Tuple= False
_a : int= False
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = TFFlaubertModelTester(self )
lowercase : List[Any] = ConfigTester(self ,config_class=snake_case ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Dict = TFFlaubertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
lowercase : int = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
lowercase : Dict = model(snake_case )[0]
lowercase : Union[str, Any] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,snake_case )
# compare the actual values for a slice.
lowercase : Tuple = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 20
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Optional[int] = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def a__ ( a__ , a__ , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ):
"""simple docstring"""
if attention_mask is None:
__SCREAMING_SNAKE_CASE = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__SCREAMING_SNAKE_CASE = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : List[str]=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : str=99 , __SCREAMING_SNAKE_CASE : Any=16 , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=4 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : str=0.02 , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = initializer_range
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__SCREAMING_SNAKE_CASE = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__SCREAMING_SNAKE_CASE = shift_tokens_right(__SCREAMING_SNAKE_CASE , 1 , 2 )
__SCREAMING_SNAKE_CASE = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = prepare_blenderbot_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return config, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 20
__SCREAMING_SNAKE_CASE = model_class_name(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model.encode(inputs_dict["""input_ids"""] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model.decode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 20
__SCREAMING_SNAKE_CASE = model_class_name(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model.encode(inputs_dict["""input_ids"""] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__SCREAMING_SNAKE_CASE = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , __SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model.decode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = 99
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__SCREAMING_SNAKE_CASE = input_ids.shape[0]
__SCREAMING_SNAKE_CASE = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._get_config_and_data()
__SCREAMING_SNAKE_CASE = FlaxBlenderbotForConditionalGeneration(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = lm_model(input_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__SCREAMING_SNAKE_CASE = FlaxBlenderbotForConditionalGeneration(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__SCREAMING_SNAKE_CASE = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__SCREAMING_SNAKE_CASE = lm_model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__SCREAMING_SNAKE_CASE = shift_tokens_right(__SCREAMING_SNAKE_CASE , 1 , 2 )
__SCREAMING_SNAKE_CASE = np.equal(__SCREAMING_SNAKE_CASE , 1 ).astype(np.floataa ).sum()
__SCREAMING_SNAKE_CASE = np.equal(__SCREAMING_SNAKE_CASE , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__SCREAMING_SNAKE_CASE , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase__ ( a , unittest.TestCase , a ):
"""simple docstring"""
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def UpperCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FlaxBlenderbotModelTester(self )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
@jax.jit
def encode_jitted(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : Optional[Any] ):
return model.encode(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
with self.subTest("""JIT Enabled""" ):
__SCREAMING_SNAKE_CASE = encode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = encode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__SCREAMING_SNAKE_CASE = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ):
return model.decode(
decoder_input_ids=__SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , encoder_outputs=__SCREAMING_SNAKE_CASE , )
with self.subTest("""JIT Enabled""" ):
__SCREAMING_SNAKE_CASE = decode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = decode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__SCREAMING_SNAKE_CASE = np.ones((1, 1) ) * model.config.eos_token_id
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" )
@slow
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
__SCREAMING_SNAKE_CASE = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
__SCREAMING_SNAKE_CASE = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
__SCREAMING_SNAKE_CASE = ["""Sam"""]
__SCREAMING_SNAKE_CASE = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""jax""" )
__SCREAMING_SNAKE_CASE = model.generate(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """Sam is a great name. It means \"sun\" in Gaelic."""
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
assert generated_txt[0].strip() == tgt_text
| 331
|
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Optional[Any]=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Optional[int]=99 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : Any=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : Optional[int]=37 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[str]="None" , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = relative_attention
__SCREAMING_SNAKE_CASE = position_biased_input
__SCREAMING_SNAKE_CASE = pos_att_type
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = 300
return config
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' )
| 331
| 1
|
"""simple docstring"""
import re
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(_UpperCAmelCase , _UpperCAmelCase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 167
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = ['model.decoder.embed_positions.weights']
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if "emb" in name:
A_ : Tuple = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
A_ : Optional[int] = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
A_ : Optional[Any] = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
A_ : int = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
A_ : Optional[int] = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
A_ : Any = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
A_ : Any = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
A_ : Dict = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
A_ : Tuple = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
A_ : Union[str, Any] = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
A_ : Tuple = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = list(state_dict.keys() )
A_ : List[Any] = {}
for key in keys:
A_ : List[str] = state_dict.pop(_UpperCAmelCase )
A_ : Tuple = rename_keys(_UpperCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
A_ : Any = val[:hidden_size, :]
A_ : Optional[int] = val[hidden_size : 2 * hidden_size, :]
A_ : Union[str, Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A_ : List[str] = val
else:
A_ : int = val
return state_dict, enc_dec_proj_state_dict
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if checkpoint == "small":
# default config values
A_ : Optional[Any] = 1024
A_ : Tuple = 24
A_ : int = 16
elif checkpoint == "medium":
A_ : Any = 1536
A_ : Union[str, Any] = 48
A_ : List[Any] = 24
elif checkpoint == "large":
A_ : Optional[int] = 2048
A_ : Optional[int] = 48
A_ : Tuple = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
A_ : Tuple = MusicgenDecoderConfig(
hidden_size=_UpperCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , )
return config
@torch.no_grad()
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="cpu" ):
"""simple docstring"""
A_ : Any = MusicGen.get_pretrained(_UpperCAmelCase , device=_UpperCAmelCase )
A_ : str = decoder_config_from_checkpoint(_UpperCAmelCase )
A_ : Optional[int] = fairseq_model.lm.state_dict()
A_ , A_ : str = rename_state_dict(
_UpperCAmelCase , hidden_size=decoder_config.hidden_size )
A_ : List[str] = TaEncoderModel.from_pretrained('''t5-base''' )
A_ : Tuple = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
A_ : Union[str, Any] = MusicgenForCausalLM(_UpperCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A_ , A_ : Tuple = decoder.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(_UpperCAmelCase ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
A_ : Tuple = MusicgenForConditionalGeneration(text_encoder=_UpperCAmelCase , audio_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_UpperCAmelCase )
# check we can do a forward pass
A_ : List[str] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A_ : Union[str, Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A_ : Tuple = model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
A_ : str = AutoTokenizer.from_pretrained('''t5-base''' )
A_ : int = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
A_ : Optional[int] = MusicgenProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# set the appropriate bos/pad token ids
A_ : Tuple = 2048
A_ : Union[str, Any] = 2048
# set other default generation config params
A_ : Union[str, Any] = int(30 * audio_encoder.config.frame_rate )
A_ : List[str] = True
A_ : List[str] = 3.0
if pytorch_dump_folder is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(_UpperCAmelCase )
processor.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 167
| 1
|
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_UpperCamelCase = logging.getLogger(__name__)
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0522, type=int)
_UpperCamelCase = parser.parse_args()
logger.info(F'Loading data from {args.data_file}')
with open(args.data_file, '''rb''') as fp:
_UpperCamelCase = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
_UpperCamelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
_UpperCamelCase = [0] * args.vocab_size
for k, v in counter.items():
_UpperCamelCase = v
logger.info(F'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 362
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 ) -> None:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = row, column
__UpperCAmelCase : Union[str, Any] = [[default_value for c in range(__UpperCAmelCase )] for r in range(__UpperCAmelCase )]
def __str__( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : Dict = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__UpperCAmelCase : Optional[Any] = 0
for row_vector in self.array:
for obj in row_vector:
__UpperCAmelCase : Union[str, Any] = max(__UpperCAmelCase , len(str(__UpperCAmelCase ) ) )
__UpperCAmelCase : Optional[int] = f'%{max_element_length}s'
# Make string and return
def single_line(__UpperCAmelCase ) -> str:
nonlocal string_format_identifier
__UpperCAmelCase : Any = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__UpperCAmelCase ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
'''simple docstring'''
return str(self )
def __A ( self , __UpperCAmelCase ) -> bool:
'''simple docstring'''
if not (isinstance(__UpperCAmelCase , (list, tuple) ) and len(__UpperCAmelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , __UpperCAmelCase ) -> Any:
'''simple docstring'''
assert self.validate_indicies(__UpperCAmelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
assert self.validate_indicies(__UpperCAmelCase )
__UpperCAmelCase : List[Any] = value
def __add__( self , __UpperCAmelCase ) -> Matrix:
'''simple docstring'''
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert self.row == another.row and self.column == another.column
# Add
__UpperCAmelCase : Dict = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__UpperCAmelCase : List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__UpperCAmelCase : Dict = -self[r, c]
return result
def __sub__( self , __UpperCAmelCase ) -> Matrix:
'''simple docstring'''
return self + (-another)
def __mul__( self , __UpperCAmelCase ) -> Matrix:
'''simple docstring'''
if isinstance(__UpperCAmelCase , (int, float) ): # Scalar multiplication
__UpperCAmelCase : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__UpperCAmelCase : List[Any] = self[r, c] * another
return result
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): # Matrix multiplication
assert self.column == another.row
__UpperCAmelCase : Dict = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__UpperCAmelCase : List[Any] = f'Unsupported type given for another ({type(__UpperCAmelCase )})'
raise TypeError(__UpperCAmelCase )
def __A ( self ) -> Matrix:
'''simple docstring'''
__UpperCAmelCase : Dict = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__UpperCAmelCase : List[str] = self[r, c]
return result
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__UpperCAmelCase : Optional[Any] = v.transpose()
__UpperCAmelCase : List[Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Dict = Matrix(3 , 3 , 0 )
for i in range(3 ):
__UpperCAmelCase : Tuple = 1
print(f'a^(-1) is {ainv}' )
# u, v
__UpperCAmelCase : Dict = Matrix(3 , 1 , 0 )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = 1, 2, -3
__UpperCAmelCase : Union[str, Any] = Matrix(3 , 1 , 0 )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = 4, -2, 5
print(f'u is {u}' )
print(f'v is {v}' )
print(f'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCAmelCase__ , lowerCAmelCase__ )}' )
def lowercase_ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 16
| 0
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
__A : Optional[Any] = OpenAIGPTTokenizer
__A : Union[str, Any] = OpenAIGPTTokenizerFast
__A : Optional[int] = True
__A : str = False
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase : Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCamelCase : Optional[int] = dict(zip(__A , range(len(__A ) ) ) )
lowerCamelCase : int = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__A ) )
def _snake_case ( self , __A ):
"""simple docstring"""
return "lower newer", "lower newer"
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase : List[str] = "lower"
lowerCamelCase : int = ["low", "er</w>"]
lowerCamelCase : Any = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
lowerCamelCase : Optional[int] = tokens + ["<unk>"]
lowerCamelCase : Optional[int] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def _snake_case ( self , __A=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(__A , **__A )
# Simple input
lowerCamelCase : Optional[Any] = "This is a simple input"
lowerCamelCase : Union[str, Any] = ["This is a simple input 1", "This is a simple input 2"]
lowerCamelCase : Tuple = ("This is a simple input", "This is a pair")
lowerCamelCase : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="max_length" )
# Simple input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="max_length" )
# Simple input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="max_length" , )
# Pair input
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="max_length" )
# Pair input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="max_length" )
# Pair input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="max_length" , )
def _snake_case ( self ):
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCAmelCase_ ( _lowerCAmelCase ):
'''simple docstring'''
pass
| 283
|
from importlib import import_module
from .logging import get_logger
_snake_case : Optional[int] = get_logger(__name__)
class a :
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str]=None ) -> Any:
__snake_case : Dict = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
__snake_case : int = module._original_module if isinstance(lowerCamelCase , _PatchedModuleObj ) else module
class a :
"""simple docstring"""
__UpperCAmelCase : List[Any] = []
def __init__( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : Optional[Any]=None ) -> List[Any]:
__snake_case : Union[str, Any] = obj
__snake_case : Dict = target
__snake_case : Any = new
__snake_case : List[str] = target.split("." )[0]
__snake_case : Union[str, Any] = {}
__snake_case : int = attrs or []
def __enter__( self : List[Any] ) -> Tuple:
*__snake_case , __snake_case : int = self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCamelCase ) ):
try:
__snake_case : Any = import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__snake_case : Union[str, Any] = getattr(self.obj , lowerCamelCase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCamelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__snake_case : List[Any] = obj_attr
# patch at top level
setattr(self.obj , lowerCamelCase , _PatchedModuleObj(lowerCamelCase , attrs=self.attrs ) )
__snake_case : Optional[int] = getattr(self.obj , lowerCamelCase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCamelCase , lowerCamelCase , _PatchedModuleObj(getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase ) , attrs=self.attrs ) )
__snake_case : List[Any] = getattr(lowerCamelCase , lowerCamelCase )
# finally set the target attribute
setattr(lowerCamelCase , lowerCamelCase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__snake_case : Union[str, Any] = getattr(import_module(".".join(lowerCamelCase ) ) , lowerCamelCase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCamelCase ) is attr_value:
__snake_case : Tuple = getattr(self.obj , lowerCamelCase )
setattr(self.obj , lowerCamelCase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__snake_case : Dict = globals()["__builtins__"][target_attr]
setattr(self.obj , lowerCamelCase , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self : Any , *lowerCamelCase : Any ) -> Optional[int]:
for attr in list(self.original ):
setattr(self.obj , lowerCamelCase , self.original.pop(lowerCamelCase ) )
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
self.__enter__()
self._active_patches.append(self )
def __snake_case ( self : Any ) -> List[str]:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 123
| 0
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
class __magic_name__ ( snake_case ):
def __init__( self , *_lowercase , **_lowercase )-> None:
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 60
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __magic_name__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self )-> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCamelCase_ = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=_lowercase , cache_dir=_lowercase )
UpperCamelCase_ = [t[-1] for t in os.walk(os.path.join(_lowercase , os.listdir(_lowercase )[0] , "snapshots" ) )]
UpperCamelCase_ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class __magic_name__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ , UpperCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=_lowercase )
UpperCamelCase_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCamelCase_ = jax.random.PRNGKey(0 )
UpperCamelCase_ = 4
UpperCamelCase_ = jax.device_count()
UpperCamelCase_ = num_samples * [prompt]
UpperCamelCase_ = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
UpperCamelCase_ = replicate(_lowercase )
UpperCamelCase_ = jax.random.split(_lowercase , _lowercase )
UpperCamelCase_ = shard(_lowercase )
UpperCamelCase_ = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(_lowercase , dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
UpperCamelCase_ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowercase ) == num_samples
def UpperCAmelCase_ ( self )-> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=_lowercase )
UpperCamelCase_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCamelCase_ = jax.random.PRNGKey(0 )
UpperCamelCase_ = 50
UpperCamelCase_ = jax.device_count()
UpperCamelCase_ = num_samples * [prompt]
UpperCamelCase_ = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
UpperCamelCase_ = replicate(_lowercase )
UpperCamelCase_ = jax.random.split(_lowercase , _lowercase )
UpperCamelCase_ = shard(_lowercase )
UpperCamelCase_ = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ , UpperCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=_lowercase )
UpperCamelCase_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCamelCase_ = jax.random.PRNGKey(0 )
UpperCamelCase_ = 50
UpperCamelCase_ = jax.device_count()
UpperCamelCase_ = num_samples * [prompt]
UpperCamelCase_ = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
UpperCamelCase_ = replicate(_lowercase )
UpperCamelCase_ = jax.random.split(_lowercase , _lowercase )
UpperCamelCase_ = shard(_lowercase )
UpperCamelCase_ = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ , UpperCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
UpperCamelCase_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCamelCase_ = jax.random.PRNGKey(0 )
UpperCamelCase_ = 50
UpperCamelCase_ = jax.device_count()
UpperCamelCase_ = num_samples * [prompt]
UpperCamelCase_ = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
UpperCamelCase_ = replicate(_lowercase )
UpperCamelCase_ = jax.random.split(_lowercase , _lowercase )
UpperCamelCase_ = shard(_lowercase )
UpperCamelCase_ = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=_lowercase , steps_offset=1 , )
UpperCamelCase_ , UpperCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=_lowercase , safety_checker=_lowercase , )
UpperCamelCase_ = scheduler.create_state()
UpperCamelCase_ = scheduler_state
UpperCamelCase_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCamelCase_ = jax.random.PRNGKey(0 )
UpperCamelCase_ = 50
UpperCamelCase_ = jax.device_count()
UpperCamelCase_ = num_samples * [prompt]
UpperCamelCase_ = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
UpperCamelCase_ = replicate(_lowercase )
UpperCamelCase_ = jax.random.split(_lowercase , _lowercase )
UpperCamelCase_ = shard(_lowercase )
UpperCamelCase_ = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCamelCase_ = jax.device_count()
UpperCamelCase_ = num_samples * [prompt]
UpperCamelCase_ = jax.random.split(jax.random.PRNGKey(0 ) , _lowercase )
UpperCamelCase_ , UpperCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=_lowercase , )
UpperCamelCase_ = replicate(_lowercase )
UpperCamelCase_ = pipeline.prepare_inputs(_lowercase )
UpperCamelCase_ = shard(_lowercase )
UpperCamelCase_ = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCamelCase_ = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCamelCase_ , UpperCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=_lowercase , use_memory_efficient_attention=_lowercase , )
UpperCamelCase_ = replicate(_lowercase )
UpperCamelCase_ = pipeline.prepare_inputs(_lowercase )
UpperCamelCase_ = shard(_lowercase )
UpperCamelCase_ = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCamelCase_ = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 60
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a: Optional[Any] = logging.get_logger(__name__)
__a: str = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "transfo-xl"
SCREAMING_SNAKE_CASE = ["mems"]
SCREAMING_SNAKE_CASE = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __lowerCAmelCase=267735 , __lowerCAmelCase=[20000, 40000, 200000] , __lowerCAmelCase=1024 , __lowerCAmelCase=1024 , __lowerCAmelCase=16 , __lowerCAmelCase=64 , __lowerCAmelCase=4096 , __lowerCAmelCase=4 , __lowerCAmelCase=False , __lowerCAmelCase=18 , __lowerCAmelCase=1600 , __lowerCAmelCase=1000 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=0 , __lowerCAmelCase=-1 , __lowerCAmelCase=True , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase="normal" , __lowerCAmelCase=0.0_1 , __lowerCAmelCase=0.0_1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-5 , __lowerCAmelCase=0 , **__lowerCAmelCase , ) -> Dict:
lowercase__ : List[Any] = vocab_size
lowercase__ : str = []
self.cutoffs.extend(__lowerCAmelCase )
if proj_share_all_but_first:
lowercase__ : str = [False] + [True] * len(self.cutoffs )
else:
lowercase__ : List[str] = [False] + [False] * len(self.cutoffs )
lowercase__ : str = d_model
lowercase__ : List[str] = d_embed
lowercase__ : List[str] = d_head
lowercase__ : List[str] = d_inner
lowercase__ : str = div_val
lowercase__ : Optional[Any] = pre_lnorm
lowercase__ : List[Any] = n_layer
lowercase__ : Union[str, Any] = n_head
lowercase__ : Dict = mem_len
lowercase__ : Tuple = same_length
lowercase__ : Any = attn_type
lowercase__ : Dict = clamp_len
lowercase__ : Any = sample_softmax
lowercase__ : Any = adaptive
lowercase__ : Any = dropout
lowercase__ : Tuple = dropatt
lowercase__ : Optional[Any] = untie_r
lowercase__ : Any = init
lowercase__ : Dict = init_range
lowercase__ : List[Any] = proj_init_std
lowercase__ : Tuple = init_std
lowercase__ : List[Any] = layer_norm_epsilon
super().__init__(eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def _lowerCAmelCase( self ) -> List[str]:
# Message copied from Transformer-XL documentation
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 198
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase = 1 , UpperCAmelCase = 1000 ):
lowercase__ : Dict = 1
lowercase__ : Dict = 0
for divide_by_number in range(UpperCAmelCase , digit + 1 ):
lowercase__ : list[int] = []
lowercase__ : Union[str, Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(UpperCAmelCase ):
lowercase__ : Dict = len(UpperCAmelCase )
lowercase__ : Optional[Any] = divide_by_number
else:
has_been_divided.append(UpperCAmelCase )
lowercase__ : int = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 198
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 256
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def UpperCAmelCase ( _lowerCamelCase ):
A : Any = []
for line in lines:
A : List[str] = re.sub(R"#.*" , "" , _lowerCamelCase ) # remove comments
if line:
filtered_lines.append(_lowerCamelCase )
A : str = "\n".join(_lowerCamelCase )
# Make a hash from all this code
A : Any = full_str.encode("utf-8" )
return shaaaa(_lowerCamelCase ).hexdigest()
# get importable module names and hash for caching
__SCREAMING_SNAKE_CASE = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__SCREAMING_SNAKE_CASE = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__SCREAMING_SNAKE_CASE = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
__SCREAMING_SNAKE_CASE = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 256
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase :Optional[int] = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : int , lowerCAmelCase : str=None , lowerCAmelCase : int=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , ):
"""simple docstring"""
if attention_mask is None:
__magic_name__ : str = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__magic_name__ : Dict = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__magic_name__ : List[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__magic_name__ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : str , _A : List[Any] , _A : Any=13 , _A : Dict=7 , _A : Tuple=True , _A : Dict=False , _A : int=99 , _A : str=16 , _A : Union[str, Any]=2 , _A : Tuple=4 , _A : int=4 , _A : Optional[int]="gelu" , _A : Tuple=0.1 , _A : Union[str, Any]=0.1 , _A : Tuple=32 , _A : List[Any]=2 , _A : Optional[Any]=1 , _A : Any=0 , _A : Dict=0.02 , ) -> Optional[Any]:
__magic_name__ : int = parent
__magic_name__ : List[Any] = batch_size
__magic_name__ : List[Any] = seq_length
__magic_name__ : int = is_training
__magic_name__ : Union[str, Any] = use_labels
__magic_name__ : Dict = vocab_size
__magic_name__ : Dict = hidden_size
__magic_name__ : Any = num_hidden_layers
__magic_name__ : Any = num_attention_heads
__magic_name__ : Dict = intermediate_size
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : str = hidden_dropout_prob
__magic_name__ : Optional[Any] = attention_probs_dropout_prob
__magic_name__ : int = max_position_embeddings
__magic_name__ : str = eos_token_id
__magic_name__ : Any = pad_token_id
__magic_name__ : Any = bos_token_id
__magic_name__ : Any = initializer_range
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
__magic_name__ : List[str] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__magic_name__ : Tuple = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__magic_name__ : Optional[Any] = shift_tokens_right(_A , 1 , 2 )
__magic_name__ : Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , )
__magic_name__ : Optional[int] = prepare_blenderbot_inputs_dict(_A , _A , _A )
return config, inputs_dict
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
__magic_name__ , __magic_name__ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self : List[str] , _A : Any , _A : Dict , _A : Tuple ) -> Dict:
__magic_name__ : str = 20
__magic_name__ : List[Any] = model_class_name(_A )
__magic_name__ : Union[str, Any] = model.encode(inputs_dict['input_ids'] )
__magic_name__ , __magic_name__ : Any = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__magic_name__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
__magic_name__ : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__magic_name__ : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__magic_name__ : List[Any] = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
__magic_name__ : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__magic_name__ : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , )
__magic_name__ : str = model.decode(_A , _A )
__magic_name__ : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
def __lowerCAmelCase ( self : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Optional[Any] ) -> List[str]:
__magic_name__ : Union[str, Any] = 20
__magic_name__ : Optional[Any] = model_class_name(_A )
__magic_name__ : Optional[Any] = model.encode(inputs_dict['input_ids'] )
__magic_name__ , __magic_name__ : str = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__magic_name__ : int = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__magic_name__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
__magic_name__ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__magic_name__ : Any = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
__magic_name__ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__magic_name__ : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , )
__magic_name__ : Dict = model.decode(_A , _A , decoder_attention_mask=_A )
__magic_name__ : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = 99
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Tuple = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__magic_name__ : Optional[Any] = input_ids.shape[0]
__magic_name__ : Optional[int] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __lowerCAmelCase ( self : str ) -> Dict:
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = self._get_config_and_data()
__magic_name__ : Tuple = FlaxBlenderbotForConditionalGeneration(_A )
__magic_name__ : Union[str, Any] = lm_model(input_ids=_A )
__magic_name__ : Union[str, Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _A )
def __lowerCAmelCase ( self : List[Any] ) -> int:
__magic_name__ : Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__magic_name__ : Optional[Any] = FlaxBlenderbotForConditionalGeneration(_A )
__magic_name__ : List[str] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__magic_name__ : Optional[Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__magic_name__ : List[Any] = lm_model(input_ids=_A , decoder_input_ids=_A )
__magic_name__ : Union[str, Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _A )
def __lowerCAmelCase ( self : str ) -> str:
__magic_name__ : Tuple = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__magic_name__ : int = shift_tokens_right(_A , 1 , 2 )
__magic_name__ : List[str] = np.equal(_A , 1 ).astype(np.floataa ).sum()
__magic_name__ : List[str] = np.equal(_A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _lowerCamelCase ( lowercase__ , unittest.TestCase , lowercase__ ):
'''simple docstring'''
A_ : int = True
A_ : Union[str, Any] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
A_ : List[Any] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Any = FlaxBlenderbotModelTester(self )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ , __magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A , _A , _A )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ : Union[str, Any] = self._prepare_for_class(_A , _A )
__magic_name__ : Any = model_class(_A )
@jax.jit
def encode_jitted(_A : Tuple , _A : Tuple=None , **_A : List[Any] ):
return model.encode(input_ids=_A , attention_mask=_A )
with self.subTest('JIT Enabled' ):
__magic_name__ : List[Any] = encode_jitted(**_A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__magic_name__ : List[str] = encode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__magic_name__ , __magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ : List[str] = model_class(_A )
__magic_name__ : List[Any] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__magic_name__ : Tuple = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_A : List[str] , _A : Any , _A : int ):
return model.decode(
decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , )
with self.subTest('JIT Enabled' ):
__magic_name__ : Tuple = decode_jitted(**_A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__magic_name__ : Any = decode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCAmelCase ( self : Any ) -> int:
for model_class_name in self.all_model_classes:
__magic_name__ : List[Any] = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__magic_name__ : List[str] = np.ones((1, 1) ) * model.config.eos_token_id
__magic_name__ : List[str] = model(_A )
self.assertIsNotNone(_A )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : Union[str, Any] = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
__magic_name__ : List[str] = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
__magic_name__ : int = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_A )
__magic_name__ : List[Any] = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
__magic_name__ : str = ['Sam']
__magic_name__ : List[Any] = tokenizer(_A , return_tensors='jax' )
__magic_name__ : Optional[Any] = model.generate(**_A , **_A )
__magic_name__ : Tuple = 'Sam is a great name. It means "sun" in Gaelic.'
__magic_name__ : List[str] = tokenizer.batch_decode(_A , **_A )
assert generated_txt[0].strip() == tgt_text
| 331
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = IFInpaintingPipeline
A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
return self._get_dummy_components()
def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]:
if str(_A ).startswith('mps' ):
__magic_name__ : Optional[Any] = torch.manual_seed(_A )
else:
__magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCAmelCase ( self : List[Any] ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __lowerCAmelCase ( self : Dict ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self : Tuple ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
self._test_save_load_local()
def __lowerCAmelCase ( self : Any ) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 331
| 1
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 10**9 ):
lowerCAmelCase : Tuple = 1
lowerCAmelCase : List[Any] = 2
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCAmelCase : Tuple = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 369
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
snake_case__ : Union[str, Any] = '''src/transformers'''
# Matches is_xxx_available()
snake_case__ : int = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
snake_case__ : List[str] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
snake_case__ : List[str] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
snake_case__ : Optional[Any] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
snake_case__ : Union[str, Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
snake_case__ : Any = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
snake_case__ : Union[str, Any] = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
snake_case__ : Optional[Any] = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
snake_case__ : Optional[Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
snake_case__ : Dict = re.compile(R'''^\s*try:''')
# Catches a line with else:
snake_case__ : int = re.compile(R'''^\s*else:''')
def _snake_case ( _snake_case : Optional[Any] ):
if _re_test_backend.search(_snake_case ) is None:
return None
lowerCAmelCase : Tuple = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def _snake_case ( _snake_case : Optional[Any] ):
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase : int = f.readlines()
lowerCAmelCase : Tuple = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase : List[str] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
lowerCAmelCase : str = _re_one_line_import_struct.search(_snake_case ).groups()[0]
lowerCAmelCase : Dict = re.findall('''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCAmelCase : Tuple = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
lowerCAmelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase : str = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCAmelCase : int = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
lowerCAmelCase : str = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : Dict = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
lowerCAmelCase : Any = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : List[str] = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase : Optional[Any] = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCAmelCase : Optional[Any] = lines[line_index]
lowerCAmelCase : List[Any] = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase : List[str] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCAmelCase : Any = lines[line_index]
lowerCAmelCase : Tuple = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase : Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _snake_case ( _snake_case : Dict , _snake_case : Optional[Any] ):
def find_duplicates(_snake_case : Tuple ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase : Any = []
for key in import_dict_objects.keys():
lowerCAmelCase : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowerCAmelCase : Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase : Tuple = '''base imports''' if key == '''none''' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def _snake_case ( ):
lowerCAmelCase : int = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
lowerCAmelCase : List[Any] = os.path.join(_snake_case , '''__init__.py''' )
lowerCAmelCase : List[Any] = parse_init(_snake_case )
if objects is not None:
lowerCAmelCase : Tuple = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase : int = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def _snake_case ( ):
lowerCAmelCase : Optional[Any] = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCAmelCase : Dict = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
lowerCAmelCase : Optional[int] = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase : Optional[Any] = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
lowerCAmelCase : Any = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
snake_case__ : str = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _snake_case ( ):
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase : Any = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(_snake_case , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase : Any = spec.loader.load_module()
lowerCAmelCase : Optional[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_snake_case ) > 0:
lowerCAmelCase : Dict = '''\n'''.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 314
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( A_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XLMTokenizer
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowerCAmelCase_ : Tuple = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
lowerCAmelCase_ : Any = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowerCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(_snake_case ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(_snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase_ : List[str] = '''lower newer'''
lowerCAmelCase_ : Optional[int] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : str = XLMTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase_ : Tuple = '''lower'''
lowerCAmelCase_ : List[Any] = ['''low''', '''er</w>''']
lowerCAmelCase_ : str = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
lowerCAmelCase_ : Optional[int] = tokens + ['''<unk>''']
lowerCAmelCase_ : Optional[int] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : Optional[int] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
lowerCAmelCase_ : str = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case )
lowerCAmelCase_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case )
lowerCAmelCase_ : Any = tokenizer.build_inputs_with_special_tokens(_snake_case )
lowerCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 224
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : Optional[int] = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : str = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
lowercase__ : List[str] = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def __UpperCAmelCase ( ) -> Optional[int]:
lowercase__ : List[str] = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : List[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Optional[Any] = 10_00
lowercase__ : Optional[Any] = '''huggingface/label-files'''
lowercase__ : Dict = num_labels
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
lowercase__ : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : str = {v: k for k, v in idalabel.items()}
lowercase__ : Any = CvtConfig(num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
lowercase__ : int = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
lowercase__ : int = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : List[Any] = [2, 2, 20]
lowercase__ : Any = [3, 12, 16]
lowercase__ : Tuple = [1_92, 7_68, 10_24]
lowercase__ : List[Any] = CvtForImageClassification(__lowerCamelCase )
lowercase__ : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
lowercase__ : List[str] = image_size
lowercase__ : Union[str, Any] = torch.load(__lowerCamelCase , map_location=torch.device('''cpu''' ) )
lowercase__ : int = OrderedDict()
lowercase__ : List[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Any = list_of_state_dict + cls_token(__lowerCamelCase )
lowercase__ : Any = list_of_state_dict + embeddings(__lowerCamelCase )
for cnt in range(config.depth[idx] ):
lowercase__ : Tuple = list_of_state_dict + attention(__lowerCamelCase , __lowerCamelCase )
lowercase__ : List[Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : Optional[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 16
| 0
|
from __future__ import annotations
from random import random
class A__ :
def __init__( self : Any , a : Union[str, Any] = None ):
'''simple docstring'''
lowerCAmelCase__ : Dict = value
lowerCAmelCase__ : Optional[int] = random()
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : str = None
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 )
def __str__( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = str(self.value ) + ' '
lowerCAmelCase__ : int = str(self.left or '' )
lowerCAmelCase__ : str = str(self.right or '' )
return value + left + right
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = split(root.left , lowercase_ )
return left, root
else:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = split(root.right , lowercase_ )
return root, right
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowerCAmelCase__ : Tuple = merge(left.right , lowercase_ )
return left
else:
lowerCAmelCase__ : Optional[Any] = merge(lowercase_ , right.left )
return right
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Node | None:
lowerCAmelCase__ : Optional[Any] = Node(lowercase_ )
lowerCAmelCase__ , lowerCAmelCase__ : Any = split(lowercase_ , lowercase_ )
return merge(merge(lowercase_ , lowercase_ ) , lowercase_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Node | None:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = split(lowercase_ , value - 1 )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = split(lowercase_ , lowercase_ )
return merge(lowercase_ , lowercase_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Node | None:
for arg in args.split():
if arg[0] == "+":
lowerCAmelCase__ : List[str] = insert(lowercase_ , int(arg[1:] ) )
elif arg[0] == "-":
lowerCAmelCase__ : int = erase(lowercase_ , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowerCAmelCase__ ( ) -> None:
lowerCAmelCase__ : List[Any] = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
lowerCAmelCase__ : Optional[int] = input()
while args != "q":
lowerCAmelCase__ : Union[str, Any] = interact_treap(lowercase_ , lowercase_ )
print(lowercase_ )
lowerCAmelCase__ : Optional[Any] = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 357
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
| 307
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class snake_case_( a__ ):
__UpperCamelCase = '''dandelin/vilt-b32-finetuned-vqa'''
__UpperCamelCase = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
__UpperCamelCase = '''image_qa'''
__UpperCamelCase = AutoProcessor
__UpperCamelCase = AutoModelForVisualQuestionAnswering
__UpperCamelCase = ['''image''', '''text''']
__UpperCamelCase = ['''text''']
def __init__( self : Optional[int] , *UpperCamelCase_ : int , **UpperCamelCase_ : str ):
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : "Image" , UpperCamelCase_ : str ):
return self.pre_processor(UpperCamelCase_ , UpperCamelCase_ , return_tensors='''pt''' )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any ):
with torch.no_grad():
return self.model(**UpperCamelCase_ ).logits
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : List[str] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 60
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class snake_case_:
def __init__( self : str , UpperCamelCase_ : int=None , UpperCamelCase_ : List[str]=None ):
# Input as list
lowerCAmelCase : str = list(poly_a or [0] )[:]
lowerCAmelCase : Any = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowerCAmelCase : Optional[int] = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowerCAmelCase : Union[str, Any] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowerCAmelCase : str = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowerCAmelCase : int = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowerCAmelCase : int = self.__multiply()
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : str ):
lowerCAmelCase : Optional[Any] = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCamelCase_ ) <= 1:
return dft[0]
#
lowerCAmelCase : Tuple = self.c_max_length // 2
while next_ncol > 0:
lowerCAmelCase : Dict = [[] for i in range(UpperCamelCase_ )]
lowerCAmelCase : List[Any] = self.root**next_ncol
# First half of next step
lowerCAmelCase : Dict = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCamelCase_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowerCAmelCase : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCamelCase_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowerCAmelCase : Optional[Any] = new_dft
lowerCAmelCase : Union[str, Any] = next_ncol // 2
return dft[0]
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = self.__dft('''A''' )
lowerCAmelCase : Optional[int] = self.__dft('''B''' )
lowerCAmelCase : Any = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowerCAmelCase : str = 2
while next_ncol <= self.c_max_length:
lowerCAmelCase : Union[str, Any] = [[] for i in range(UpperCamelCase_ )]
lowerCAmelCase : Optional[Any] = self.root ** (next_ncol // 2)
lowerCAmelCase : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowerCAmelCase : Any = new_inverse_c
next_ncol *= 2
# Unpack
lowerCAmelCase : Optional[int] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : int ):
lowerCAmelCase : int = '''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowerCAmelCase : str = '''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowerCAmelCase : int = '''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60
| 1
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = ["model.decoder.embed_positions.weights"]
def A ( lowercase ) -> List[Any]:
'''simple docstring'''
if "emb" in name:
UpperCamelCase = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
UpperCamelCase = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
UpperCamelCase = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
UpperCamelCase = name.replace('linear1' , 'fc1' )
if "linear2" in name:
UpperCamelCase = name.replace('linear2' , 'fc2' )
if "norm1" in name:
UpperCamelCase = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
UpperCamelCase = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
UpperCamelCase = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
UpperCamelCase = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
UpperCamelCase = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCamelCase = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def A ( lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = list(state_dict.keys() )
UpperCamelCase = {}
for key in keys:
UpperCamelCase = state_dict.pop(__a )
UpperCamelCase = rename_keys(__a )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCamelCase = val[:hidden_size, :]
UpperCamelCase = val[hidden_size : 2 * hidden_size, :]
UpperCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCamelCase = val
else:
UpperCamelCase = val
return state_dict, enc_dec_proj_state_dict
def A ( lowercase ) -> Optional[int]:
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCamelCase = 1_024
UpperCamelCase = 24
UpperCamelCase = 16
elif checkpoint == "medium":
UpperCamelCase = 1_536
UpperCamelCase = 48
UpperCamelCase = 24
elif checkpoint == "large":
UpperCamelCase = 2_048
UpperCamelCase = 48
UpperCamelCase = 32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
UpperCamelCase = MusicgenDecoderConfig(
hidden_size=__a , ffn_dim=hidden_size * 4 , num_hidden_layers=__a , num_attention_heads=__a , )
return config
@torch.no_grad()
def A ( lowercase , lowercase=None , lowercase=None , lowercase="cpu" ) -> str:
'''simple docstring'''
UpperCamelCase = MusicGen.get_pretrained(__a , device=__a )
UpperCamelCase = decoder_config_from_checkpoint(__a )
UpperCamelCase = fairseq_model.lm.state_dict()
UpperCamelCase = rename_state_dict(
__a , hidden_size=decoder_config.hidden_size )
UpperCamelCase = TaEncoderModel.from_pretrained('t5-base' )
UpperCamelCase = EncodecModel.from_pretrained('facebook/encodec_32khz' )
UpperCamelCase = MusicgenForCausalLM(__a ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCamelCase = decoder.load_state_dict(__a , strict=__a )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__a )
if len(__a ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(__a ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
UpperCamelCase = MusicgenForConditionalGeneration(text_encoder=__a , audio_encoder=__a , decoder=__a )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__a )
# check we can do a forward pass
UpperCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCamelCase = model(input_ids=__a , decoder_input_ids=__a ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
UpperCamelCase = AutoTokenizer.from_pretrained('t5-base' )
UpperCamelCase = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
UpperCamelCase = MusicgenProcessor(feature_extractor=__a , tokenizer=__a )
# set the appropriate bos/pad token ids
UpperCamelCase = 2_048
UpperCamelCase = 2_048
# set other default generation config params
UpperCamelCase = int(30 * audio_encoder.config.frame_rate )
UpperCamelCase = True
UpperCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(__a ).mkdir(exist_ok=__a )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(__a )
processor.push_to_hub(__a )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 355
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowercase :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = LlamaModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ )
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = LlamaModel(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , )
UpperCamelCase = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> str:
"""simple docstring"""
UpperCamelCase = LlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = LlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , )
UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['hidden_states'][0]
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['hidden_states'][0]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : str = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__lowercase : str = (LlamaForCausalLM,) if is_torch_available() else ()
__lowercase : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : int = False
__lowercase : Optional[int] = False
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = LlamaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'single_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'multi_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = LlamaModel(A_ )
original_model.to(A_ )
original_model.eval()
UpperCamelCase = original_model(A_ ).last_hidden_state
UpperCamelCase = original_model(A_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = {'type': scaling_type, 'factor': 10.0}
UpperCamelCase = LlamaModel(A_ )
scaled_model.to(A_ )
scaled_model.eval()
UpperCamelCase = scaled_model(A_ ).last_hidden_state
UpperCamelCase = scaled_model(A_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A_ , A_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) )
@require_torch
class lowercase ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
UpperCamelCase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
UpperCamelCase = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
UpperCamelCase = model(torch.tensor(A_ ) )
# Expected mean on dim = -1
UpperCamelCase = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
UpperCamelCase = model(torch.tensor(A_ ) )
# Expected mean on dim = -1
UpperCamelCase = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
UpperCamelCase = model(torch.tensor(A_ ) )
UpperCamelCase = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
# fmt: off
UpperCamelCase = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
UpperCamelCase = 'Simply put, the theory of relativity states that '
UpperCamelCase = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
UpperCamelCase = tokenizer.encode(A_ , return_tensors='pt' )
UpperCamelCase = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=A_ )
# greedy generation outputs
UpperCamelCase = model.generate(A_ , max_new_tokens=64 , top_p=A_ , temperature=1 , do_sample=A_ )
UpperCamelCase = tokenizer.decode(generated_ids[0] , skip_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 110
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''imagegpt'''
snake_case__ = ['''past_key_values''']
snake_case__ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] , __UpperCamelCase : List[Any]=512 + 1 , __UpperCamelCase : List[str]=32 * 32 , __UpperCamelCase : int=512 , __UpperCamelCase : Any=24 , __UpperCamelCase : List[str]=8 , __UpperCamelCase : Dict=None , __UpperCamelCase : Optional[Any]="quick_gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Dict=1E-5 , __UpperCamelCase : Tuple=0.0_2 , __UpperCamelCase : List[str]=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : List[str]=False , __UpperCamelCase : List[str]=False , __UpperCamelCase : Union[str, Any]=False , **__UpperCamelCase : List[Any] , ) -> Union[str, Any]:
_UpperCamelCase = vocab_size
_UpperCamelCase = n_positions
_UpperCamelCase = n_embd
_UpperCamelCase = n_layer
_UpperCamelCase = n_head
_UpperCamelCase = n_inner
_UpperCamelCase = activation_function
_UpperCamelCase = resid_pdrop
_UpperCamelCase = embd_pdrop
_UpperCamelCase = attn_pdrop
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = initializer_range
_UpperCamelCase = scale_attn_weights
_UpperCamelCase = use_cache
_UpperCamelCase = scale_attn_by_inverse_layer_idx
_UpperCamelCase = reorder_and_upcast_attn
_UpperCamelCase = tie_word_embeddings
super().__init__(tie_word_embeddings=__UpperCamelCase , **__UpperCamelCase )
class UpperCAmelCase_ ( _lowercase):
@property
def _UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : "FeatureExtractionMixin" , __UpperCamelCase : int = 1 , __UpperCamelCase : int = -1 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional["TensorType"] = None , __UpperCamelCase : int = 3 , __UpperCamelCase : int = 32 , __UpperCamelCase : int = 32 , ) -> Mapping[str, Any]:
_UpperCamelCase = self._generate_dummy_images(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = dict(preprocessor(images=__UpperCamelCase , return_tensors=__UpperCamelCase ) )
return inputs
| 256
|
"""simple docstring"""
def lowercase ( a__ : float , a__ : float ) -> float:
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
a =tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
a =model(__A )['''last_hidden_state''']
a =tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __A )
# compare the actual values for a slice.
a =tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 215
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _A ( lowercase ):
"""simple docstring"""
a ={}
a =tokenizer(example['''content'''] , truncation=lowercase )['''input_ids''']
a =len(example['''content'''] ) / len(output['''input_ids'''] )
return output
lowerCamelCase_ : Optional[int] = HfArgumentParser(PretokenizationArguments)
lowerCamelCase_ : Optional[Any] = parser.parse_args()
if args.num_workers is None:
lowerCamelCase_ : Tuple = multiprocessing.cpu_count()
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCamelCase_ : Any = time.time()
lowerCamelCase_ : int = load_dataset(args.dataset_name, split="""train""")
print(F'Dataset loaded in {time.time()-t_start:.2f}s')
lowerCamelCase_ : List[str] = time.time()
lowerCamelCase_ : str = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'Dataset tokenized in {time.time()-t_start:.2f}s')
lowerCamelCase_ : Union[str, Any] = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'Data pushed to the hub in {time.time()-t_start:.2f}s')
| 215
| 1
|
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] , __snake_case : int ):
'''simple docstring'''
lowercase = torch.load(_A , map_location='cpu' )
lowercase = chkpt['model']
# We have the base model one level deeper than the original XLM repository
lowercase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowercase = v
else:
lowercase = v
lowercase = chkpt['params']
lowercase = {n: v for n, v in config.items() if not isinstance(_A , (torch.FloatTensor, numpy.ndarray) )}
lowercase = chkpt['dico_word2id']
lowercase = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
lowercase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowercase = pytorch_dump_folder_path + '/' + CONFIG_NAME
lowercase = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(_A , _A )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_A , indent=2 ) + '\n' )
print(f'Save vocab file to {pytorch_config_dump_path}' )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_A , indent=2 ) + '\n' )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase : int = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 220
|
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( _A = "AAPL" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
SCREAMING_SNAKE_CASE__ = BeautifulSoup(requests.get(_A ).text , '''html.parser''' )
SCREAMING_SNAKE_CASE__ = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 314
| 0
|
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self :List[str] , __magic_name__ :List[str] , __magic_name__ :List[Any]=13 , __magic_name__ :Any=7 , __magic_name__ :Optional[int]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :Any=99 , __magic_name__ :List[str]=32 , __magic_name__ :List[str]=5 , __magic_name__ :str=4 , __magic_name__ :str=37 , __magic_name__ :Optional[int]="gelu" , __magic_name__ :int=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :List[str]=512 , __magic_name__ :Tuple=16 , __magic_name__ :Tuple=2 , __magic_name__ :List[str]=0.02 , __magic_name__ :Any=4 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = FlaxRoFormerModelTester(self )
@slow
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__magic_name__ )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
a = jnp.array([[0, 1, 2, 3, 4, 5]] )
a = model(__magic_name__ )[0]
a = 5_0000
a = (1, 6, vocab_size)
self.assertEqual(output.shape , __magic_name__ )
a = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 351
|
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :List[str] , __magic_name__ :int="</s>" , __magic_name__ :List[Any]="<unk>" , __magic_name__ :Optional[Any]="<pad>" , __magic_name__ :Optional[int]=125 , __magic_name__ :List[str]=None , **__magic_name__ :List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
a = [F'<extra_id_{i}>' for i in range(__magic_name__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
a = len(set(filter(lambda __magic_name__ : bool("""extra_id""" in str(__magic_name__ ) ) , __magic_name__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token
super().__init__(
eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , extra_ids=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , )
a = extra_ids
a = 2**8 # utf is 8 bits
# define special tokens dict
a = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
a = len(self.special_tokens_encoder )
a = len(__magic_name__ )
for i, token in enumerate(__magic_name__ ):
a = self.vocab_size + i - n
a = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None , __magic_name__ :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__magic_name__ )) + [1]
return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1]
def lowerCamelCase__ ( self :str , __magic_name__ :List[int] ):
'''simple docstring'''
if len(__magic_name__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = self._add_eos_if_not_present(__magic_name__ )
if token_ids_a is None:
return token_ids_a
else:
a = self._add_eos_if_not_present(__magic_name__ )
return token_ids_a + token_ids_a
def lowerCamelCase__ ( self :List[str] , __magic_name__ :str ):
'''simple docstring'''
a = [chr(__magic_name__ ) for i in text.encode("""utf-8""" )]
return tokens
def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ):
'''simple docstring'''
if token in self.special_tokens_encoder:
a = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
a = self.added_tokens_encoder[token]
elif len(__magic_name__ ) != 1:
a = self.unk_token_id
else:
a = ord(__magic_name__ ) + self._num_special_tokens
return token_id
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict ):
'''simple docstring'''
if index in self.special_tokens_decoder:
a = self.special_tokens_decoder[index]
else:
a = chr(index - self._num_special_tokens )
return token
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Optional[int] ):
'''simple docstring'''
a = b""""""
for token in tokens:
if token in self.special_tokens_decoder:
a = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
a = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
a = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
a = token.encode("""utf-8""" )
else:
a = bytes([ord(__magic_name__ )] )
bstring += tok_string
a = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :str , __magic_name__ :Optional[str] = None ):
'''simple docstring'''
return ()
| 347
| 0
|
"""simple docstring"""
def A_ ( _lowercase = 3, _lowercase = 7, _lowercase = 1000000 ):
'''simple docstring'''
snake_case_ :List[Any] = 0
snake_case_ :Any = 1
for current_denominator in range(1, limit + 1 ):
snake_case_ :int = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
snake_case_ :List[str] = current_numerator
snake_case_ :Any = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 66
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 0
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = (KDPMaDiscreteScheduler,)
UpperCAmelCase__ : List[str] = 10
def __lowercase ( self , **_a ) -> Optional[int]:
_a : Any = {
'''num_train_timesteps''': 1_1_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_a )
return config
def __lowercase ( self ) -> Union[str, Any]:
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_a )
def __lowercase ( self ) -> Tuple:
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def __lowercase ( self ) -> Optional[int]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def __lowercase ( self ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __lowercase ( self ) -> Optional[Any]:
_a : int = self.scheduler_classes[0]
_a : Dict = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a : List[str] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_a : Optional[Any] = self.dummy_model()
_a : str = self.dummy_sample_deter * scheduler.init_noise_sigma
_a : Dict = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_a : Tuple = scheduler.scale_model_input(_a , _a )
_a : Dict = model(_a , _a )
_a : str = scheduler.step(_a , _a , _a )
_a : Dict = output.prev_sample
_a : Optional[Any] = torch.sum(torch.abs(_a ) )
_a : Any = torch.mean(torch.abs(_a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def __lowercase ( self ) -> Dict:
if torch_device == "mps":
return
_a : Optional[int] = self.scheduler_classes[0]
_a : int = self.get_scheduler_config()
_a : Optional[int] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_a : Any = self.dummy_model()
_a : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
_a : Optional[Any] = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_a : Optional[Any] = scheduler.scale_model_input(_a , _a )
_a : List[Any] = model(_a , _a )
_a : Optional[Any] = scheduler.step(_a , _a , _a )
_a : Optional[Any] = output.prev_sample
_a : Dict = torch.sum(torch.abs(_a ) )
_a : Tuple = torch.mean(torch.abs(_a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def __lowercase ( self ) -> Dict:
if torch_device == "mps":
return
_a : List[str] = self.scheduler_classes[0]
_a : str = self.get_scheduler_config()
_a : List[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_a : Union[str, Any] = self.dummy_model()
_a : Tuple = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_a : str = scheduler.scale_model_input(_a , _a )
_a : int = model(_a , _a )
_a : int = scheduler.step(_a , _a , _a )
_a : Dict = output.prev_sample
_a : Tuple = torch.sum(torch.abs(_a ) )
_a : Union[str, Any] = torch.mean(torch.abs(_a ) )
if str(_a ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 15
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : Any = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_a : Dict = parser.parse_args()
return args.f
def __UpperCAmelCase ( __a : Optional[int] ,__a : List[str]="eval" ) -> Any:
"""simple docstring"""
_a : Any = os.path.join(__a ,F"""{split}_results.json""" )
if os.path.exists(__a ):
with open(__a ,'''r''' ) as f:
return json.load(__a )
raise ValueError(F"""can't find {path}""" )
a__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
_a : Any = self.get_auto_remove_tmp_dir()
_a : Optional[Any] = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_glue.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __lowercase ( self ) -> Dict:
_a : Tuple = self.get_auto_remove_tmp_dir()
_a : Tuple = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_clm_flax.main()
_a : List[str] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def __lowercase ( self ) -> Optional[int]:
_a : str = self.get_auto_remove_tmp_dir()
_a : Optional[int] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(_a , '''argv''' , _a ):
run_summarization_flax.main()
_a : Optional[int] = get_results(_a , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __lowercase ( self ) -> Tuple:
_a : List[str] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = self.get_auto_remove_tmp_dir()
_a : int = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_ta_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __lowercase ( self ) -> Optional[Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_a : Any = 7 if get_gpu_count() > 1 else 2
_a : List[Any] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_ner.main()
_a : Dict = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __lowercase ( self ) -> Any:
_a : Optional[int] = self.get_auto_remove_tmp_dir()
_a : Union[str, Any] = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_qa.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 15
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase : Any = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = KandinskyVaaImgaImgPipeline
_lowercase : Tuple = ['''image_embeds''', '''negative_image_embeds''', '''image''']
_lowercase : Any = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_lowercase : Union[str, Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowercase : Optional[Any] = False
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase__ = UNetaDConditionModel(**UpperCamelCase_ )
return model
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.dummy_unet
lowercase__ = self.dummy_movq
lowercase__ = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowercase__ = DDIMScheduler(**UpperCamelCase_ )
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int]=0 ) -> Optional[int]:
"""simple docstring"""
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase_ )
# create init_image
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((256, 256) )
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowercase__ = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase_ )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
lowercase__ = output.images
lowercase__ = pipe(
**self.get_dummy_inputs(UpperCamelCase_ ) , return_dict=UpperCamelCase_ , )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: str ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase__ = '''A red cartoon frog, 4k'''
lowercase__ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase_ )
lowercase__ = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowercase__ = pipeline.to(UpperCamelCase_ )
pipeline.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ , lowercase__ = pipe_prior(
UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowercase__ = pipeline(
image=UpperCamelCase_ , image_embeds=UpperCamelCase_ , negative_image_embeds=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 110
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Any = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = "blenderbot-small"
A : Union[str, Any] = ["past_key_values"]
A : List[str] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Any , UpperCAmelCase_ : List[Any]=5_0_2_6_5 , UpperCAmelCase_ : Any=5_1_2 , UpperCAmelCase_ : Optional[Any]=8 , UpperCAmelCase_ : Tuple=2_0_4_8 , UpperCAmelCase_ : Optional[int]=1_6 , UpperCAmelCase_ : str=8 , UpperCAmelCase_ : Optional[int]=2_0_4_8 , UpperCAmelCase_ : Tuple=1_6 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Any=5_1_2 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Union[str, Any]=2 , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : List[Any] = vocab_size
a : Optional[Any] = max_position_embeddings
a : Optional[Any] = d_model
a : Dict = encoder_ffn_dim
a : List[Any] = encoder_layers
a : Dict = encoder_attention_heads
a : Optional[int] = decoder_ffn_dim
a : Optional[int] = decoder_layers
a : Tuple = decoder_attention_heads
a : List[str] = dropout
a : List[Any] = attention_dropout
a : Dict = activation_dropout
a : Tuple = activation_function
a : Union[str, Any] = init_std
a : Tuple = encoder_layerdrop
a : List[str] = decoder_layerdrop
a : Optional[int] = use_cache
a : str = encoder_layers
a : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , forced_eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
class UpperCamelCase ( a_ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
a : Union[str, Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
a : Any = {0: 'batch'}
a : Union[str, Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
a : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
a : Union[str, Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase_ , direction='inputs')
elif self.task == "causal-lm":
# TODO: figure this case out.
a : Optional[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
a , a : Any = self.num_layers
for i in range(UpperCAmelCase_):
a : Tuple = {0: 'batch', 2: 'past_sequence + sequence'}
a : List[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
else:
a : str = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
])
return common_inputs
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
a : Dict = super().outputs
else:
a : Union[str, Any] = super(UpperCAmelCase_ , self).outputs
if self.use_past:
a , a : Optional[Any] = self.num_layers
for i in range(UpperCAmelCase_):
a : Optional[int] = {0: 'batch', 2: 'past_sequence + sequence'}
a : Dict = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ):
"""simple docstring"""
a : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# Generate decoder inputs
a : Optional[Any] = seq_length if not self.use_past else 1
a : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
a : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
a : Union[str, Any] = dict(**UpperCAmelCase_ , **UpperCAmelCase_)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
a , a : int = common_inputs['input_ids'].shape
a : Optional[int] = common_inputs['decoder_input_ids'].shape[1]
a , a : Tuple = self.num_attention_heads
a : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a : List[Any] = decoder_seq_length + 3
a : Optional[int] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
a : Dict = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCAmelCase_ , UpperCAmelCase_)] , dim=1)
a : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
a , a : List[Any] = self.num_layers
a : Union[str, Any] = min(UpperCAmelCase_ , UpperCAmelCase_)
a : Optional[int] = max(UpperCAmelCase_ , UpperCAmelCase_) - min_num_layers
a : Tuple = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCAmelCase_):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCAmelCase_),
torch.zeros(UpperCAmelCase_),
torch.zeros(UpperCAmelCase_),
torch.zeros(UpperCAmelCase_),
))
# TODO: test this.
a : Dict = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCAmelCase_ , UpperCAmelCase_):
common_inputs["past_key_values"].append((torch.zeros(UpperCAmelCase_), torch.zeros(UpperCAmelCase_)))
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ):
"""simple docstring"""
a : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
a , a : int = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
a : Any = seqlen + 2
a , a : Optional[int] = self.num_layers
a , a : int = self.num_attention_heads
a : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a : Dict = common_inputs['attention_mask'].dtype
a : Tuple = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCAmelCase_ , UpperCAmelCase_ , dtype=UpperCAmelCase_)] , dim=1)
a : Dict = [
(torch.zeros(UpperCAmelCase_), torch.zeros(UpperCAmelCase_)) for _ in range(UpperCAmelCase_)
]
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ):
"""simple docstring"""
a : Optional[int] = compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a : List[str] = tokenizer.num_special_tokens_to_add(UpperCAmelCase_)
a : Any = compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
a : Optional[Any] = [' '.join([tokenizer.unk_token]) * seq_length] * batch_size
a : List[str] = dict(tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_))
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
a : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_)
elif self.task == "causal-lm":
a : str = self._generate_dummy_inputs_for_causal_lm(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_)
else:
a : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_)
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
a : Optional[Any] = super()._flatten_past_key_values_(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
else:
a : Any = super(UpperCAmelCase_ , self)._flatten_past_key_values_(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
| 345
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Tuple = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345
| 1
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def snake_case_ ( lowerCAmelCase_ )-> typing.Counter[int]:
'''simple docstring'''
_UpperCAmelCase : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(lowerCAmelCase_ , max_perimeter + 1 ):
_UpperCAmelCase : List[str] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def snake_case_ ( lowerCAmelCase_ = 1000 )-> int:
'''simple docstring'''
_UpperCAmelCase : int = pythagorean_triple(lowerCAmelCase_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 215
|
'''simple docstring'''
class lowercase :
"""simple docstring"""
def __init__( self ) -> List[str]:
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Optional[int] = {}
def _snake_case ( self ,a_ ) -> Optional[Any]:
if vertex not in self.adjacency:
_UpperCAmelCase : int = {}
self.num_vertices += 1
def _snake_case ( self ,a_ ,a_ ,a_ ) -> int:
self.add_vertex(a_ )
self.add_vertex(a_ )
if head == tail:
return
_UpperCAmelCase : List[Any] = weight
_UpperCAmelCase : Dict = weight
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Optional[int] = self.get_edges()
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = edge
edges.remove((tail, head, weight) )
for i in range(len(a_ ) ):
_UpperCAmelCase : str = list(edges[i] )
edges.sort(key=lambda a_ : e[2] )
for i in range(len(a_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_UpperCAmelCase : Optional[Any] = edges[i][2] + 1
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = edge
_UpperCAmelCase : str = weight
_UpperCAmelCase : List[str] = weight
def __str__( self ) -> Any:
_UpperCAmelCase : List[Any] = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_UpperCAmelCase : List[str] = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _snake_case ( self ) -> Optional[int]:
return self.adjacency.keys()
@staticmethod
def _snake_case ( a_=None ,a_=None ) -> Tuple:
_UpperCAmelCase : List[Any] = Graph()
if vertices is None:
_UpperCAmelCase : List[str] = []
if edges is None:
_UpperCAmelCase : Optional[Any] = []
for vertex in vertices:
g.add_vertex(a_ )
for edge in edges:
g.add_edge(*a_ )
return g
class lowercase :
"""simple docstring"""
def __init__( self ) -> int:
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : int = {}
def __len__( self ) -> Tuple:
return len(self.parent )
def _snake_case ( self ,a_ ) -> str:
if item in self.parent:
return self.find(a_ )
_UpperCAmelCase : Optional[Any] = item
_UpperCAmelCase : List[Any] = 0
return item
def _snake_case ( self ,a_ ) -> List[str]:
if item not in self.parent:
return self.make_set(a_ )
if item != self.parent[item]:
_UpperCAmelCase : List[Any] = self.find(self.parent[item] )
return self.parent[item]
def _snake_case ( self ,a_ ,a_ ) -> Union[str, Any]:
_UpperCAmelCase : Any = self.find(a_ )
_UpperCAmelCase : List[str] = self.find(a_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_UpperCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_UpperCAmelCase : Any = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_UpperCAmelCase : List[str] = roota
return roota
return None
@staticmethod
def _snake_case ( a_ ) -> List[Any]:
_UpperCAmelCase : int = graph.num_vertices
_UpperCAmelCase : int = Graph.UnionFind()
_UpperCAmelCase : Optional[int] = []
while num_components > 1:
_UpperCAmelCase : int = {}
for vertex in graph.get_vertices():
_UpperCAmelCase : Union[str, Any] = -1
_UpperCAmelCase : Tuple = graph.get_edges()
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = edge
edges.remove((tail, head, weight) )
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = edge
_UpperCAmelCase : Any = union_find.find(a_ )
_UpperCAmelCase : Any = union_find.find(a_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_UpperCAmelCase : Tuple = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_UpperCAmelCase : List[str] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = cheap_edge[vertex]
if union_find.find(a_ ) != union_find.find(a_ ):
union_find.union(a_ ,a_ )
mst_edges.append(cheap_edge[vertex] )
_UpperCAmelCase : Tuple = num_components - 1
_UpperCAmelCase : Optional[int] = Graph.build(edges=a_ )
return mst
| 215
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A : Dict = {
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 357
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __lowerCamelCase ( __a :str ) -> Optional[int]:
"""simple docstring"""
A__ = {}
A__ = job["""started_at"""]
A__ = job["""completed_at"""]
A__ = date_parser.parse(__a )
A__ = date_parser.parse(__a )
A__ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
A__ = start
A__ = end
A__ = duration_in_min
return job_info
def __lowerCamelCase ( __a :Optional[Any] , __a :List[str]=None ) -> List[Any]:
"""simple docstring"""
A__ = None
if token is not None:
A__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'Bearer {token}'}
A__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
A__ = requests.get(__a , headers=__a ).json()
A__ = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(__a ) for job in result["""jobs"""]} )
A__ = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(__a ):
A__ = requests.get(url + F'&page={i + 2}' , headers=__a ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(__a ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
A : Dict = parser.parse_args()
A : List[Any] = get_job_time(args.workflow_run_id)
A : int = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 276
| 0
|
_A = tuple[float, float, float]
_A = tuple[float, float, float]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =end_pointa[0] - end_pointa[0]
__UpperCamelCase =end_pointa[1] - end_pointa[1]
__UpperCamelCase =end_pointa[2] - end_pointa[2]
return (x, y, z)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =ab[1] * ac[2] - ab[2] * ac[1] # *i
__UpperCamelCase =(ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__UpperCamelCase =ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ):
return tuple(round(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in vector ) == (0, 0, 0)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] = 10 ):
__UpperCamelCase =create_vector(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase =create_vector(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return is_zero_vector(get_ad_vectors_cross(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
| 62
|
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : str = tf.data.AUTOTUNE
def _a ( ) -> List[str]:
snake_case_ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=_SCREAMING_SNAKE_CASE , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=_SCREAMING_SNAKE_CASE , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=_SCREAMING_SNAKE_CASE , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=_SCREAMING_SNAKE_CASE , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=_SCREAMING_SNAKE_CASE , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=_SCREAMING_SNAKE_CASE , help="""Model ID to upload to on the Hugging Face Hub.""" )
snake_case_ = parser.parse_args()
return args
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
try:
if args.tpu_name:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(_SCREAMING_SNAKE_CASE )
tf.tpu.experimental.initialize_tpu_system(_SCREAMING_SNAKE_CASE )
return tpu
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = 0
for file in file_list:
snake_case_ = file.split("""/""" )[-1]
snake_case_ = re.search(r"""-\d+-(\d+)\.tfrecord""" , _SCREAMING_SNAKE_CASE ).group(1 )
snake_case_ = int(_SCREAMING_SNAKE_CASE )
num_samples += sample_count
return num_samples
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
snake_case_ = count_samples(_SCREAMING_SNAKE_CASE )
snake_case_ = tf.data.Dataset.from_tensor_slices(_SCREAMING_SNAKE_CASE )
if shuffle:
snake_case_ = dataset.shuffle(len(_SCREAMING_SNAKE_CASE ) )
snake_case_ = tf.data.TFRecordDataset(_SCREAMING_SNAKE_CASE , num_parallel_reads=_SCREAMING_SNAKE_CASE )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(_SCREAMING_SNAKE_CASE ) )
snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE )
if shuffle:
assert shuffle_buffer_size is not None
snake_case_ = dataset.shuffle(args.shuffle_buffer_size )
snake_case_ = dataset.batch(_SCREAMING_SNAKE_CASE , drop_remainder=_SCREAMING_SNAKE_CASE )
snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE )
snake_case_ = dataset.prefetch(_SCREAMING_SNAKE_CASE )
return dataset
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
if not args.no_tpu:
snake_case_ = initialize_tpu(_SCREAMING_SNAKE_CASE )
snake_case_ = tf.distribute.TPUStrategy(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case_ = tokenizer.vocab_size
snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
snake_case_ = count_samples(_SCREAMING_SNAKE_CASE )
snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case_ = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case_ = TFAutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case_ , snake_case_ = create_optimizer(
num_train_steps=_SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_SCREAMING_SNAKE_CASE , metrics=["""accuracy"""] )
def decode_fn(_SCREAMING_SNAKE_CASE ):
snake_case_ = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case_ = DataCollatorForLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
def mask_with_collator(_SCREAMING_SNAKE_CASE ):
# TF really needs an isin() function
snake_case_ = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
snake_case_ , snake_case_ = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(_SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_SCREAMING_SNAKE_CASE , )
return batch
snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case_ = prepare_dataset(
_SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , )
snake_case_ = prepare_dataset(
_SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , )
snake_case_ = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_SCREAMING_SNAKE_CASE ) )
model.fit(
_SCREAMING_SNAKE_CASE , validation_data=_SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=_SCREAMING_SNAKE_CASE , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args()
main(args)
| 347
| 0
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : Any = MODEL_FOR_MASKED_LM_MAPPING
A__ : Union[str, Any] = TF_MODEL_FOR_MASKED_LM_MAPPING
def A__ ( self: Tuple ) -> Any:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def A__ ( self: Any ) -> int:
UpperCAmelCase_ : List[str] = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,top_k=2 ,framework="""tf""" )
UpperCAmelCase_ : Union[str, Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__a ,decimals=6 ) ,[
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 38015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 25506, """token_str""": """ accuser"""},
] ,)
UpperCAmelCase_ : Dict = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__a ,decimals=6 ) ,[
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 38015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 25506,
"""token_str""": """ accuser""",
},
] ,)
UpperCAmelCase_ : List[str] = unmasker("""My name is <mask>""" ,targets=[""" Patrick""", """ Clara""", """ Teven"""] ,top_k=3 )
self.assertEqual(
nested_simplify(__a ,decimals=6 ) ,[
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 13606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2941, """token_str""": """ Te"""},
] ,)
@require_torch
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,top_k=2 ,framework="""pt""" )
UpperCAmelCase_ : Tuple = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__a ,decimals=6 ) ,[
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 35676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 16416, """token_str""": """ELS"""},
] ,)
UpperCAmelCase_ : List[str] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__a ,decimals=6 ) ,[
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 35676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 16416, """token_str""": """ELS"""},
] ,)
UpperCAmelCase_ : Tuple = unmasker("""My name is <mask>""" ,targets=[""" Patrick""", """ Clara""", """ Teven"""] ,top_k=3 )
self.assertEqual(
nested_simplify(__a ,decimals=6 ) ,[
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 13606, """token_str""": """ Clara"""},
] ,)
UpperCAmelCase_ : List[str] = unmasker("""My name is <mask> <mask>""" ,top_k=2 )
self.assertEqual(
nested_simplify(__a ,decimals=6 ) ,[
[
{
"""score""": 2.2e-05,
"""token""": 35676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 16416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 35676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 16416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] ,)
@require_torch_gpu
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : Any = pipeline("""fill-mask""" ,model="""hf-internal-testing/tiny-random-distilbert""" ,device=0 ,framework="""pt""" )
# convert model to fp16
pipe.model.half()
UpperCAmelCase_ : Any = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__a ,__a )
@slow
@require_torch
def A__ ( self: Dict ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = pipeline(task="""fill-mask""" ,model="""distilroberta-base""" ,top_k=2 ,framework="""pt""" )
self.run_large_test(__a )
@slow
@require_tf
def A__ ( self: Optional[Any] ) -> List[str]:
UpperCAmelCase_ : int = pipeline(task="""fill-mask""" ,model="""distilroberta-base""" ,top_k=2 ,framework="""tf""" )
self.run_large_test(__a )
def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__a ) ,[
{"""sequence""": """My name is John""", """score""": 0.0_0_8, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.0_0_7, """token""": 1573, """token_str""": """ Chris"""},
] ,)
UpperCAmelCase_ : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__a ) ,[
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.2_5_1,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.2_1_4,
"""token""": 12790,
"""token_str""": """ Lyon""",
},
] ,)
UpperCAmelCase_ : str = unmasker("""My name is <mask>""" ,targets=[""" Patrick""", """ Clara""", """ Teven"""] ,top_k=3 )
self.assertEqual(
nested_simplify(__a ) ,[
{"""sequence""": """My name is Patrick""", """score""": 0.0_0_5, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.0_0_0, """token""": 13606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.0_0_0, """token""": 2941, """token_str""": """ Te"""},
] ,)
@require_torch
def A__ ( self: Optional[Any] ) -> str:
UpperCAmelCase_ : Union[str, Any] = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,framework="""pt""" )
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Union[str, Any] = None
self.run_pipeline_test(__a ,[] )
@require_tf
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Dict = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,framework="""tf""" )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Any = None
self.run_pipeline_test(__a ,[] )
def A__ ( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ) -> Tuple:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
UpperCAmelCase_ : Optional[Any] = FillMaskPipeline(model=__a ,tokenizer=__a )
UpperCAmelCase_ : str = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def A__ ( self: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ) -> str:
UpperCAmelCase_ : Tuple = fill_masker.tokenizer
UpperCAmelCase_ : Any = fill_masker.model
UpperCAmelCase_ : Union[str, Any] = fill_masker(
F'''This is a {tokenizer.mask_token}''' ,)
self.assertEqual(
__a ,[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
] ,)
UpperCAmelCase_ : int = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
__a ,[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
] ,)
UpperCAmelCase_ : str = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
__a ,[
[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
],
[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
],
] ,)
with self.assertRaises(__a ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__a ):
fill_masker("""This is""" )
self.run_test_top_k(__a ,__a )
self.run_test_targets(__a ,__a )
self.run_test_top_k_targets(__a ,__a )
self.fill_mask_with_duplicate_targets_and_top_k(__a ,__a )
self.fill_mask_with_multiple_masks(__a ,__a )
def A__ ( self: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Dict ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = tokenizer.get_vocab()
UpperCAmelCase_ : Optional[int] = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCAmelCase_ : Dict = FillMaskPipeline(model=__a ,tokenizer=__a ,targets=__a )
UpperCAmelCase_ : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
__a ,[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
] ,)
UpperCAmelCase_ : Tuple = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} ,__a )
UpperCAmelCase_ : Tuple = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} ,set(__a ) )
# Call argument
UpperCAmelCase_ : Dict = FillMaskPipeline(model=__a ,tokenizer=__a )
UpperCAmelCase_ : int = fill_masker(F'''This is a {tokenizer.mask_token}''' ,targets=__a )
self.assertEqual(
__a ,[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
] ,)
UpperCAmelCase_ : Tuple = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} ,__a )
UpperCAmelCase_ : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} ,set(__a ) )
# Score equivalence
UpperCAmelCase_ : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' ,targets=__a )
UpperCAmelCase_ : List[Any] = [top_mask["""token_str"""] for top_mask in outputs]
UpperCAmelCase_ : Union[str, Any] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__a ) == set(__a ):
UpperCAmelCase_ : List[str] = fill_masker(F'''This is a {tokenizer.mask_token}''' ,targets=__a )
UpperCAmelCase_ : Dict = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__a ) ,nested_simplify(__a ) )
# Raises with invalid
with self.assertRaises(__a ):
UpperCAmelCase_ : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' ,targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__a ):
UpperCAmelCase_ : str = fill_masker(F'''This is a {tokenizer.mask_token}''' ,targets=[""""""] )
with self.assertRaises(__a ):
UpperCAmelCase_ : Dict = fill_masker(F'''This is a {tokenizer.mask_token}''' ,targets="""""" )
def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Any = FillMaskPipeline(model=__a ,tokenizer=__a ,top_k=2 )
UpperCAmelCase_ : List[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
__a ,[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
] ,)
UpperCAmelCase_ : Union[str, Any] = FillMaskPipeline(model=__a ,tokenizer=__a )
UpperCAmelCase_ : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' ,top_k=2 )
self.assertEqual(
__a ,[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
] ,)
self.assertEqual(nested_simplify(__a ) ,nested_simplify(__a ) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ) -> Any:
UpperCAmelCase_ : List[str] = tokenizer.get_vocab()
UpperCAmelCase_ : Dict = FillMaskPipeline(model=__a ,tokenizer=__a )
# top_k=2, ntargets=3
UpperCAmelCase_ : Any = sorted(vocab.keys() )[:3]
UpperCAmelCase_ : List[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' ,top_k=2 ,targets=__a )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCAmelCase_ : Tuple = [el["""token_str"""] for el in sorted(__a ,key=lambda lowerCamelCase_ : x["score"] ,reverse=__a )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__a ).issubset(__a ):
UpperCAmelCase_ : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' ,top_k=3 ,targets=__a )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__a ) ,nested_simplify(__a ) )
def A__ ( self: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: str ) -> Dict:
UpperCAmelCase_ : Tuple = FillMaskPipeline(model=__a ,tokenizer=__a )
UpperCAmelCase_ : Union[str, Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCAmelCase_ : int = sorted(vocab.keys() )[:3]
UpperCAmelCase_ : Union[str, Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCAmelCase_ : Optional[int] = fill_masker(F'''My name is {tokenizer.mask_token}''' ,targets=__a ,top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__a ) ,3 )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: int ) -> Any:
UpperCAmelCase_ : Optional[int] = FillMaskPipeline(model=__a ,tokenizer=__a )
UpperCAmelCase_ : Optional[Any] = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' ,top_k=2 )
self.assertEqual(
__a ,[
[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
],
[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
],
[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
],
] ,)
| 361
|
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCamelCase_ = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
UpperCamelCase_ = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
UpperCamelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
'''simple docstring'''
def A__ ( self: Any ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) ,reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] ,)
def A__ ( self: List[str] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any]=None ) -> int:
return {
"matthews_correlation": float(matthews_corrcoef(lowerCamelCase_ ,lowerCamelCase_ ,sample_weight=lowerCamelCase_ ) ),
}
| 59
| 0
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = (KDPMaDiscreteScheduler,)
snake_case_ = 10
def UpperCamelCase_ ( self : Optional[int] ,**A : int ):
__A = {
"num_train_timesteps": 11_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**A )
return config
def UpperCamelCase_ ( self : Dict ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=A )
def UpperCamelCase_ ( self : Dict ):
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] ,[0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=A ,beta_end=A )
def UpperCamelCase_ ( self : Tuple ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A )
def UpperCamelCase_ ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def UpperCamelCase_ ( self : str ):
__A = self.scheduler_classes[0]
__A = self.get_scheduler_config(prediction_type="v_prediction" )
__A = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
__A = self.dummy_model()
__A = self.dummy_sample_deter * scheduler.init_noise_sigma
__A = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
__A = scheduler.scale_model_input(A ,A )
__A = model(A ,A )
__A = scheduler.step(A ,A ,A )
__A = output.prev_sample
__A = torch.sum(torch.abs(A ) )
__A = torch.mean(torch.abs(A ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.00_02 ) < 1E-3
def UpperCamelCase_ ( self : Optional[Any] ):
if torch_device == "mps":
return
__A = self.scheduler_classes[0]
__A = self.get_scheduler_config()
__A = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
__A = self.dummy_model()
__A = self.dummy_sample_deter * scheduler.init_noise_sigma
__A = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
__A = scheduler.scale_model_input(A ,A )
__A = model(A ,A )
__A = scheduler.step(A ,A ,A )
__A = output.prev_sample
__A = torch.sum(torch.abs(A ) )
__A = torch.mean(torch.abs(A ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
def UpperCamelCase_ ( self : Dict ):
if torch_device == "mps":
return
__A = self.scheduler_classes[0]
__A = self.get_scheduler_config()
__A = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps ,device=A )
__A = self.dummy_model()
__A = self.dummy_sample_deter.to(A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__A = scheduler.scale_model_input(A ,A )
__A = model(A ,A )
__A = scheduler.step(A ,A ,A )
__A = output.prev_sample
__A = torch.sum(torch.abs(A ) )
__A = torch.mean(torch.abs(A ) )
if str(A ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
| 15
|
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
if len(a_ ) <= 1:
return lst
__A = 1
while i < len(a_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__A , __A = lst[i], lst[i - 1]
i -= 1
if i == 0:
__A = 1
return lst
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :List[Any] = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE :List[Any] = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 15
| 1
|
'''simple docstring'''
from math import sqrt
def __UpperCAmelCase ( a_: Optional[int] ):
_UpperCAmelCase : Tuple = 0
for i in range(1, int(sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE__ ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE__ ):
total += i
return total - n
def __UpperCAmelCase ( a_: str = 10_000 ):
_UpperCAmelCase : Optional[Any] = sum(
i
for i in range(1, SCREAMING_SNAKE_CASE__ )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE__ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 366
|
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__a = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : List[Any]=1_8 , lowerCAmelCase__ : str=3_0 , lowerCAmelCase__ : str=4_0_0 , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[Any]=None , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = size if size is not None else {"height": 2_0, "width": 2_0}
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : Optional[Any] = image_size
_UpperCAmelCase : Dict = min_resolution
_UpperCAmelCase : str = max_resolution
_UpperCAmelCase : List[Any] = size
_UpperCAmelCase : Union[str, Any] = do_normalize
_UpperCAmelCase : Optional[Any] = do_convert_rgb
_UpperCAmelCase : str = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
_UpperCAmelCase : str = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
_UpperCAmelCase : Optional[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any = PixaStructImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = PixaStructImageProcessingTester(self )
@property
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb" ) )
def _lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.image_processor_tester.prepare_dummy_image()
_UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
_UpperCAmelCase : str = 2_0_4_8
_UpperCAmelCase : Any = image_processor(lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase : List[str] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : str = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase : Union[str, Any] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
_UpperCAmelCase : str = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(lowerCAmelCase__ ):
_UpperCAmelCase : str = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
_UpperCAmelCase : Any = "Hello"
_UpperCAmelCase : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : List[Any] = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
_UpperCAmelCase : Any = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : Union[str, Any] = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCAmelCase : List[str] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : str = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = PixaStructImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = PixaStructImageProcessingTester(self , num_channels=4 )
_UpperCAmelCase : List[Any] = 3
@property
def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb" ) )
def _lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Any = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : Tuple = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 17
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Tuple = "blenderbot-small"
A__ : List[str] = ["past_key_values"]
A__ : Any = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self: str ,lowerCamelCase_: List[Any]=50265 ,lowerCamelCase_: Any=512 ,lowerCamelCase_: Any=8 ,lowerCamelCase_: str=2048 ,lowerCamelCase_: Any=16 ,lowerCamelCase_: List[Any]=8 ,lowerCamelCase_: Union[str, Any]=2048 ,lowerCamelCase_: List[Any]=16 ,lowerCamelCase_: Tuple=0.0 ,lowerCamelCase_: Dict=0.0 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Any=True ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Any=512 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Any=0.0 ,lowerCamelCase_: Dict=0.0 ,lowerCamelCase_: List[Any]=0.0_2 ,lowerCamelCase_: str=1 ,lowerCamelCase_: str=False ,lowerCamelCase_: int=0 ,lowerCamelCase_: Any=1 ,lowerCamelCase_: int=2 ,lowerCamelCase_: Dict=2 ,**lowerCamelCase_: Optional[Any] ,) -> Any:
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : Dict = d_model
UpperCAmelCase_ : Optional[int] = encoder_ffn_dim
UpperCAmelCase_ : str = encoder_layers
UpperCAmelCase_ : List[Any] = encoder_attention_heads
UpperCAmelCase_ : Tuple = decoder_ffn_dim
UpperCAmelCase_ : Optional[int] = decoder_layers
UpperCAmelCase_ : List[Any] = decoder_attention_heads
UpperCAmelCase_ : str = dropout
UpperCAmelCase_ : Dict = attention_dropout
UpperCAmelCase_ : int = activation_dropout
UpperCAmelCase_ : Optional[Any] = activation_function
UpperCAmelCase_ : int = init_std
UpperCAmelCase_ : int = encoder_layerdrop
UpperCAmelCase_ : List[Any] = decoder_layerdrop
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : List[Any] = encoder_layers
UpperCAmelCase_ : str = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,is_encoder_decoder=lowerCamelCase_ ,decoder_start_token_id=lowerCamelCase_ ,forced_eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
class _snake_case ( __snake_case ):
'''simple docstring'''
@property
def A__ ( self: int ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ : str = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase_ : Union[str, Any] = {0: """batch"""}
UpperCAmelCase_ : Tuple = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
UpperCAmelCase_ : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
UpperCAmelCase_ : int = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ ,direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase_ : List[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.num_layers
for i in range(lowerCamelCase_ ):
UpperCAmelCase_ : Any = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCAmelCase_ : Dict = {0: """batch""", 2: """past_sequence + sequence"""}
else:
UpperCAmelCase_ : Any = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def A__ ( self: Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ : Dict = super().outputs
else:
UpperCAmelCase_ : Union[str, Any] = super(lowerCamelCase_ ,self ).outputs
if self.use_past:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.num_layers
for i in range(lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCAmelCase_ : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def A__ ( self: List[Any] ,lowerCamelCase_: PreTrainedTokenizer ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional[TensorType] = None ,) -> Mapping[str, Any]:
UpperCAmelCase_ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# Generate decoder inputs
UpperCAmelCase_ : Union[str, Any] = seq_length if not self.use_past else 1
UpperCAmelCase_ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase_ : Tuple = dict(**lowerCamelCase_ ,**lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ : int = common_inputs["""input_ids"""].shape
UpperCAmelCase_ : Union[str, Any] = common_inputs["""decoder_input_ids"""].shape[1]
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.num_attention_heads
UpperCAmelCase_ : List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase_ : List[str] = decoder_seq_length + 3
UpperCAmelCase_ : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase_ : Union[str, Any] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(lowerCamelCase_ ,lowerCamelCase_ )] ,dim=1 )
UpperCAmelCase_ : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.num_layers
UpperCAmelCase_ : Dict = min(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = max(lowerCamelCase_ ,lowerCamelCase_ ) - min_num_layers
UpperCAmelCase_ : str = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(lowerCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
) )
# TODO: test this.
UpperCAmelCase_ : Any = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(lowerCamelCase_ ,lowerCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) )
return common_inputs
def A__ ( self: Optional[int] ,lowerCamelCase_: PreTrainedTokenizer ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional[TensorType] = None ,) -> Mapping[str, Any]:
UpperCAmelCase_ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ : Dict = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCAmelCase_ : Union[str, Any] = seqlen + 2
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.num_layers
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.num_attention_heads
UpperCAmelCase_ : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase_ : Optional[Any] = common_inputs["""attention_mask"""].dtype
UpperCAmelCase_ : Union[str, Any] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(lowerCamelCase_ ,lowerCamelCase_ ,dtype=lowerCamelCase_ )] ,dim=1 )
UpperCAmelCase_ : Optional[int] = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(lowerCamelCase_ )
]
return common_inputs
def A__ ( self: str ,lowerCamelCase_: PreTrainedTokenizer ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ : Union[str, Any] = tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ : Dict = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase_ : List[str] = dict(tokenizer(lowerCamelCase_ ,return_tensors=lowerCamelCase_ ) )
return common_inputs
def A__ ( self: Optional[Any] ,lowerCamelCase_: PreTrainedTokenizer ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase_ ,batch_size=lowerCamelCase_ ,seq_length=lowerCamelCase_ ,is_pair=lowerCamelCase_ ,framework=lowerCamelCase_ )
elif self.task == "causal-lm":
UpperCAmelCase_ : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase_ ,batch_size=lowerCamelCase_ ,seq_length=lowerCamelCase_ ,is_pair=lowerCamelCase_ ,framework=lowerCamelCase_ )
else:
UpperCAmelCase_ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ ,batch_size=lowerCamelCase_ ,seq_length=lowerCamelCase_ ,is_pair=lowerCamelCase_ ,framework=lowerCamelCase_ )
return common_inputs
def A__ ( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ : List[str] = super()._flatten_past_key_values_(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
else:
UpperCAmelCase_ : Optional[Any] = super(lowerCamelCase_ ,self )._flatten_past_key_values_(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
| 345
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : int = resnets
UpperCAmelCase_ : Tuple = attentions
if self.add_downsample:
UpperCAmelCase_ : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int=True ) -> int:
UpperCAmelCase_ : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
UpperCAmelCase_ : str = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[Any] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : Dict = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnets
if self.add_downsample:
UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=True ) -> Any:
UpperCAmelCase_ : Union[str, Any] = ()
for resnet in self.resnets:
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[str] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : int = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = resnets
UpperCAmelCase_ : Dict = attentions
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=True ) -> List[str]:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1]
UpperCAmelCase_ : Union[str, Any] = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Dict = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ : Any = []
for i in range(self.num_layers ):
UpperCAmelCase_ : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : str = resnets
if self.add_upsample:
UpperCAmelCase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ : Dict = res_hidden_states_tuple[-1]
UpperCAmelCase_ : str = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
# there is always at least one resnet
UpperCAmelCase_ : List[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
UpperCAmelCase_ : Any = []
for _ in range(self.num_layers ):
UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Dict = resnets
UpperCAmelCase_ : Any = attentions
def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=True ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
UpperCAmelCase_ : Optional[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
return hidden_states
| 345
| 1
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCAmelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCAmelCase_ = 'cuda' if torch.cuda.is_available() else 'cpu'
def snake_case( __magic_name__ , __magic_name__=1_00 , __magic_name__=" " ) -> List[str]:
'''simple docstring'''
lowercase : Union[str, Any] = text.split(__magic_name__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__magic_name__ ) , __magic_name__ )]
def snake_case( __magic_name__ ) -> dict:
'''simple docstring'''
lowercase , lowercase : str = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(__magic_name__ ):
titles.append(title if title is not None else '''''' )
texts.append(__magic_name__ )
return {"title": titles, "text": texts}
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> dict:
'''simple docstring'''
lowercase : Tuple = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=__magic_name__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
lowercase : List[Any] = ctx_encoder(input_ids.to(device=__magic_name__ ) , return_dict=__magic_name__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , ) -> Optional[int]:
'''simple docstring'''
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase : Optional[int] = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase : Union[str, Any] = dataset.map(__magic_name__ , batched=__magic_name__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowercase : List[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__magic_name__ )
lowercase : int = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowercase : int = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
lowercase : Union[str, Any] = dataset.map(
partial(__magic_name__ , ctx_encoder=__magic_name__ , ctx_tokenizer=__magic_name__ ) , batched=__magic_name__ , batch_size=processing_args.batch_size , features=__magic_name__ , )
# And finally save your dataset
lowercase : List[str] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(__magic_name__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase : int = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=__magic_name__ )
# And save the index
lowercase : List[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(__magic_name__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _A :
_UpperCamelCase : str = field(
default=str(Path(_lowerCamelCase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
_UpperCamelCase : Optional[str] = field(
default=_lowerCamelCase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
_UpperCamelCase : str = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
_UpperCamelCase : str = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
_UpperCamelCase : Optional[str] = field(
default=str(Path(_lowerCamelCase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class _A :
_UpperCamelCase : Optional[int] = field(
default=_lowerCamelCase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
_UpperCamelCase : int = field(
default=1_6 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class _A :
_UpperCamelCase : int = field(
default=7_6_8 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
_UpperCamelCase : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCAmelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 116
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
lowerCAmelCase_ = {
'moussaKam/mbarthez': 10_24,
'moussaKam/barthez': 10_24,
'moussaKam/barthez-orangesum-title': 10_24,
}
lowerCAmelCase_ = '▁'
class _A ( _lowerCamelCase ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , _A : Optional[int] , _A : List[str]="<s>" , _A : Tuple="</s>" , _A : Dict="</s>" , _A : Dict="<s>" , _A : List[str]="<unk>" , _A : str="<pad>" , _A : Any="<mask>" , _A : Optional[Dict[str, Any]] = None , **_A : Union[str, Any] , ) -> None:
"""simple docstring"""
lowercase : List[str] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
lowercase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
lowercase : Any = vocab_file
lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
lowercase : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase : Tuple = len(self.sp_model ) - 1
lowercase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __a ( self : Tuple , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Any = [self.cls_token_id]
lowercase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __a ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase : List[str] = [self.sep_token_id]
lowercase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return len(self.sp_model )
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
lowercase : int = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self : Union[str, Any] , _A : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_A , out_type=_A )
def __a ( self : Optional[int] , _A : str ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : Optional[int] = self.sp_model.PieceToId(_A )
return spm_id if spm_id else self.unk_token_id
def __a ( self : Any , _A : List[str] ) -> List[str]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_A )
def __a ( self : Any , _A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Dict = []
lowercase : Any = ''''''
lowercase : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_A ) + token
lowercase : int = True
lowercase : Optional[Any] = []
else:
current_sub_tokens.append(_A )
lowercase : List[str] = False
out_string += self.sp_model.decode(_A )
return out_string.strip()
def __getstate__( self : int ) -> Optional[Any]:
"""simple docstring"""
lowercase : str = self.__dict__.copy()
lowercase : Any = None
return state
def __setstate__( self : Optional[int] , _A : Tuple ) -> str:
"""simple docstring"""
lowercase : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Union[str, Any] = {}
lowercase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self : str , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Any = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , '''wb''' ) as fi:
lowercase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 116
| 1
|
import sys
from collections import defaultdict
class lowercase_ :
"""simple docstring"""
def __init__( self ) ->List[str]:
lowerCAmelCase = []
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->str:
lowerCAmelCase = pos
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCAmelCase = 2 * start + 1
else:
lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCAmelCase = heap[smallest_child], positions[smallest_child]
lowerCAmelCase = (
heap[start],
positions[start],
)
lowerCAmelCase = temp, tempa
lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __SCREAMING_SNAKE_CASE )
self.top_to_bottom(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Dict:
lowerCAmelCase = position[index]
while index != 0:
lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCAmelCase = heap[parent]
lowerCAmelCase = position[parent]
self.set_position(position[parent] , __SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = val
lowerCAmelCase = temp
self.set_position(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
break
lowerCAmelCase = parent
else:
lowerCAmelCase = val
lowerCAmelCase = temp
self.set_position(__SCREAMING_SNAKE_CASE , 0 )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) // 2 - 1
for i in range(__SCREAMING_SNAKE_CASE , -1 , -1 ):
self.top_to_bottom(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[Any]:
lowerCAmelCase = positions[0]
lowerCAmelCase = sys.maxsize
self.top_to_bottom(__SCREAMING_SNAKE_CASE , 0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
return temp
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]:
lowerCAmelCase = Heap()
lowerCAmelCase = [0] * len(_UpperCAmelCase )
lowerCAmelCase = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
lowerCAmelCase = []
for vertex in range(len(_UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCAmelCase )
heap.node_position.append(_UpperCAmelCase )
lowerCAmelCase = []
lowerCAmelCase = 1
lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCAmelCase = 0
lowerCAmelCase = distance
heap.heapify(_UpperCAmelCase , _UpperCAmelCase )
for _ in range(1 , len(_UpperCAmelCase ) ):
lowerCAmelCase = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCAmelCase )]
):
lowerCAmelCase = distance
heap.bottom_to_top(
_UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowercase__ : List[str] = int(input('''Enter number of edges: ''').strip())
lowercase__ : List[Any] = defaultdict(list)
for _ in range(edges_number):
lowercase__ : Union[str, Any] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 338
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[int]=1_3 , SCREAMING_SNAKE_CASE :Optional[int]=7 , SCREAMING_SNAKE_CASE :Tuple=False , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Optional[int]=False , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :List[str]=3_3 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Tuple=5 , SCREAMING_SNAKE_CASE :int=4 , SCREAMING_SNAKE_CASE :Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :Optional[Any]=0.1 , SCREAMING_SNAKE_CASE :Tuple=0.1 , SCREAMING_SNAKE_CASE :str=5_1_2 , SCREAMING_SNAKE_CASE :Dict=1_6 , SCREAMING_SNAKE_CASE :Dict=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :str=3 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :List[str]=None , ) -> Union[str, Any]:
'''simple docstring'''
_a : Union[str, Any] =parent
_a : List[Any] =batch_size
_a : Optional[int] =seq_length
_a : Union[str, Any] =is_training
_a : List[Any] =use_input_mask
_a : Optional[int] =use_token_type_ids
_a : int =use_labels
_a : List[str] =vocab_size
_a : List[Any] =hidden_size
_a : int =num_hidden_layers
_a : Tuple =num_attention_heads
_a : Any =intermediate_size
_a : str =hidden_act
_a : Union[str, Any] =hidden_dropout_prob
_a : Union[str, Any] =attention_probs_dropout_prob
_a : str =max_position_embeddings
_a : Dict =type_vocab_size
_a : Tuple =type_sequence_label_size
_a : Dict =initializer_range
_a : List[str] =num_labels
_a : Tuple =num_choices
_a : int =scope
def __UpperCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
_a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : List[Any] =None
if self.use_input_mask:
_a : Any =random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] =None
_a : str =None
_a : Dict =None
if self.use_labels:
_a : Dict =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : List[str] =ids_tensor([self.batch_size] , self.num_choices )
_a : List[Any] =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int ) -> Tuple:
'''simple docstring'''
_a : Any =EsmModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Optional[Any] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE )
_a : str =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : str =EsmForMaskedLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_a : int =self.num_labels
_a : Tuple =EsmForTokenClassification(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Tuple =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
_a : Optional[Any] =self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Any =config_and_inputs
_a : List[Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Any = False
__UpperCamelCase : Any = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase : str = ()
__UpperCamelCase : List[str] = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Union[str, Any] = True
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
_a : Dict =EsmModelTester(self )
_a : Optional[Any] =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCAmelCase ( self :Tuple ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
_a : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
_a : List[str] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : Dict =type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
_a : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
_a : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self :str ) -> Dict:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Union[str, Any] =EsmModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
_a : Optional[Any] =self.model_tester.prepare_config_and_inputs()[0]
_a : Dict =EsmEmbeddings(config=SCREAMING_SNAKE_CASE )
_a : Tuple =torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
_a : Optional[Any] =torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_a : Any =create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
_a : List[Any] =self.model_tester.prepare_config_and_inputs()[0]
_a : Optional[int] =EsmEmbeddings(config=SCREAMING_SNAKE_CASE )
_a : Tuple =torch.empty(2 , 4 , 3_0 )
_a : str =[
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_a : int =torch.as_tensor([expected_single_positions, expected_single_positions] )
_a : Any =embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def __UpperCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def __UpperCAmelCase ( self :str ) -> Any:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
pass
@require_torch
class A__ ( UpperCAmelCase__ ):
@slow
def __UpperCAmelCase ( self :List[Any] ) -> str:
'''simple docstring'''
with torch.no_grad():
_a : Optional[int] =EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_a : Any =torch.tensor([[0, 1, 2, 3, 4, 5]] )
_a : Tuple =model(SCREAMING_SNAKE_CASE )[0]
_a : int =3_3
_a : Tuple =torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
_a : Any =EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_a : Any =torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_a : int =model(SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
_a : str =torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 276
| 0
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A ( snake_case :Optional[Any] ) -> Union[str, Any]:
__UpperCamelCase = {}
__UpperCamelCase = tokenizer(example['content'] , truncation=snake_case )['input_ids']
__UpperCamelCase = len(example['content'] ) / len(output['input_ids'] )
return output
UpperCamelCase : Tuple = HfArgumentParser(PretokenizationArguments)
UpperCamelCase : Union[str, Any] = parser.parse_args()
if args.num_workers is None:
UpperCamelCase : Optional[Any] = multiprocessing.cpu_count()
UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase : Optional[int] = time.time()
UpperCamelCase : Optional[int] = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase : List[Any] = time.time()
UpperCamelCase : Tuple = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 361
|
"""simple docstring"""
UpperCamelCase : Union[str, Any] = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def A ( snake_case :Dict , snake_case :Tuple , snake_case :str , snake_case :Optional[int] ) -> Union[str, Any]:
# Return True if there is node that has not iterated.
__UpperCamelCase = [False] * len(snake_case )
__UpperCamelCase = [s]
__UpperCamelCase = True
while queue:
__UpperCamelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(snake_case )
__UpperCamelCase = True
__UpperCamelCase = u
return visited[t]
def A ( snake_case :int , snake_case :Any , snake_case :Union[str, Any] ) -> Optional[int]:
__UpperCamelCase = [-1] * (len(snake_case ))
__UpperCamelCase = 0
__UpperCamelCase = []
__UpperCamelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(snake_case , snake_case , snake_case , snake_case ):
__UpperCamelCase = float('Inf' )
__UpperCamelCase = sink
while s != source:
# Find the minimum value in select path
__UpperCamelCase = min(snake_case , graph[parent[s]][s] )
__UpperCamelCase = parent[s]
max_flow += path_flow
__UpperCamelCase = sink
while v != source:
__UpperCamelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__UpperCamelCase = parent[v]
for i in range(len(snake_case ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 263
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Any = logging.get_logger(__name__)
lowerCAmelCase__ : List[Any] = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "gptsan-japanese"
snake_case__ = [
"past_key_values",
]
snake_case__ = {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Tuple ,lowerCamelCase__ : List[Any]=36_000 ,lowerCamelCase__ : Optional[int]=1_280 ,lowerCamelCase__ : int=1_024 ,lowerCamelCase__ : List[str]=8_192 ,lowerCamelCase__ : List[str]=4_096 ,lowerCamelCase__ : Tuple=128 ,lowerCamelCase__ : List[str]=10 ,lowerCamelCase__ : int=0 ,lowerCamelCase__ : Optional[int]=16 ,lowerCamelCase__ : Any=16 ,lowerCamelCase__ : int=128 ,lowerCamelCase__ : int=0.0 ,lowerCamelCase__ : List[str]=1e-5 ,lowerCamelCase__ : Union[str, Any]=False ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : int="float32" ,lowerCamelCase__ : int=False ,lowerCamelCase__ : List[Any]=False ,lowerCamelCase__ : Any=False ,lowerCamelCase__ : List[Any]=0.0_0_2 ,lowerCamelCase__ : str=False ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Dict=35_998 ,lowerCamelCase__ : Optional[Any]=35_995 ,lowerCamelCase__ : Any=35_999 ,**lowerCamelCase__ : Optional[int] ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = d_ff
UpperCAmelCase__ = d_ext
UpperCAmelCase__ = d_spout
UpperCAmelCase__ = num_switch_layers
UpperCAmelCase__ = num_ext_layers
UpperCAmelCase__ = num_switch_layers + num_ext_layers
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = num_experts
UpperCAmelCase__ = expert_capacity
UpperCAmelCase__ = dropout_rate
UpperCAmelCase__ = layer_norm_epsilon
UpperCAmelCase__ = router_bias
UpperCAmelCase__ = router_jitter_noise
UpperCAmelCase__ = router_dtype
UpperCAmelCase__ = router_ignore_padding_tokens
UpperCAmelCase__ = output_hidden_states
UpperCAmelCase__ = output_attentions
UpperCAmelCase__ = initializer_factor
UpperCAmelCase__ = output_router_logits
UpperCAmelCase__ = use_cache
super().__init__(
separator_token_id=lowerCamelCase__ ,pad_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
| 98
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[str]:
'''simple docstring'''
snake_case : int = tempfile.mkdtemp()
# fmt: off
snake_case : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
snake_case : int = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
snake_case : Optional[Any] = os.path.join(self.tmpdirname , snake_case__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , **snake_case__ : str ) -> Optional[int]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , **snake_case__ : List[str] ) -> int:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> str:
'''simple docstring'''
snake_case : List[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case : Optional[int] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = self.get_tokenizer()
snake_case : Optional[Any] = self.get_image_processor()
snake_case : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor.save_pretrained(self.tmpdirname )
snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : str = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case : Optional[int] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case : Tuple = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
snake_case : List[str] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : str = self.get_image_processor()
snake_case : Optional[int] = self.get_tokenizer()
snake_case : List[Any] = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Optional[Any] = self.prepare_image_inputs()
snake_case : str = image_processor(snake_case__ , return_tensors="np" )
snake_case : Any = processor(images=snake_case__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = self.get_image_processor()
snake_case : int = self.get_tokenizer()
snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Tuple = "lower newer"
snake_case : Tuple = processor(text=snake_case__ )
snake_case : Union[str, Any] = tokenizer(snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = self.get_image_processor()
snake_case : Dict = self.get_tokenizer()
snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : int = "lower newer"
snake_case : Dict = self.prepare_image_inputs()
snake_case : Union[str, Any] = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(snake_case__ ):
processor()
def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple:
'''simple docstring'''
snake_case : Tuple = self.get_image_processor()
snake_case : Optional[Any] = self.get_tokenizer()
snake_case : Tuple = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case : List[Any] = processor.batch_decode(snake_case__ )
snake_case : Union[str, Any] = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case : str = self.get_image_processor()
snake_case : Union[str, Any] = self.get_tokenizer()
snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Optional[Any] = "lower newer"
snake_case : List[Any] = self.prepare_image_inputs()
snake_case : Tuple = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 59
| 0
|
__UpperCAmelCase = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__UpperCAmelCase = [{'type': 'code', 'content': INSTALL_CONTENT}]
__UpperCAmelCase = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 145
|
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = 0, 0, 0
UpperCAmelCase_ : Union[str, Any] = ugly_nums[ia] * 2
UpperCAmelCase_ : Tuple = ugly_nums[ia] * 3
UpperCAmelCase_ : Union[str, Any] = ugly_nums[ia] * 5
for _ in range(1 , __snake_case ):
UpperCAmelCase_ : Tuple = min(__snake_case , __snake_case , __snake_case )
ugly_nums.append(__snake_case )
if next_num == next_a:
ia += 1
UpperCAmelCase_ : Union[str, Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase_ : Any = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase_ : List[str] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'{ugly_numbers(200) = }')
| 145
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowercase__ ( lowercase ):
lowercase__ = """openai/whisper-base"""
lowercase__ = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
lowercase__ = """transcriber"""
lowercase__ = WhisperProcessor
lowercase__ = WhisperForConditionalGeneration
lowercase__ = ["""audio"""]
lowercase__ = ["""text"""]
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
return self.pre_processor(lowerCamelCase__ ,return_tensors='pt' ).input_features
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
return self.model.generate(inputs=lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ )[0]
| 83
|
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str]) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCamelCase_, torch.Tensor):
return image
elif isinstance(UpperCamelCase_, PIL.Image.Image):
__lowercase = [image]
if isinstance(image[0], PIL.Image.Image):
__lowercase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
__lowercase = np.concatenate(UpperCamelCase_, axis=0)
__lowercase = np.array(UpperCamelCase_).astype(np.floataa) / 255.0
__lowercase = image.transpose(0, 3, 1, 2)
__lowercase = 2.0 * image - 1.0
__lowercase = torch.from_numpy(UpperCamelCase_)
elif isinstance(image[0], torch.Tensor):
__lowercase = torch.cat(UpperCamelCase_, dim=0)
return image
def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : str, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[Any]=0.9_995) -> int:
'''simple docstring'''
if not isinstance(UpperCamelCase_, np.ndarray):
__lowercase = True
__lowercase = va.device
__lowercase = va.cpu().numpy()
__lowercase = va.cpu().numpy()
__lowercase = np.sum(va * va / (np.linalg.norm(UpperCamelCase_) * np.linalg.norm(UpperCamelCase_)))
if np.abs(UpperCamelCase_) > DOT_THRESHOLD:
__lowercase = (1 - t) * va + t * va
else:
__lowercase = np.arccos(UpperCamelCase_)
__lowercase = np.sin(UpperCamelCase_)
__lowercase = theta_a * t
__lowercase = np.sin(UpperCamelCase_)
__lowercase = np.sin(theta_a - theta_t) / sin_theta_a
__lowercase = sin_theta_t / sin_theta_a
__lowercase = sa * va + sa * va
if inputs_are_torch:
__lowercase = torch.from_numpy(UpperCamelCase_).to(UpperCamelCase_)
return va
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Union[str, Any]) -> int:
'''simple docstring'''
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : str) -> Optional[int]:
'''simple docstring'''
for param in model.parameters():
__lowercase = value
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], UpperCAmelCase__ : CLIPFeatureExtractor, UpperCAmelCase__ : Union[str, Any]=None, UpperCAmelCase__ : List[str]=None, UpperCAmelCase__ : Any=None, ):
super().__init__()
self.register_modules(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, clip_model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, coca_model=UpperCAmelCase__, coca_tokenizer=UpperCAmelCase__, coca_transform=UpperCAmelCase__, )
__lowercase = (
feature_extractor.size
if isinstance(feature_extractor.size, UpperCAmelCase__ )
else feature_extractor.size["shortest_edge"]
)
__lowercase = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std )
set_requires_grad(self.text_encoder, UpperCAmelCase__ )
set_requires_grad(self.clip_model, UpperCAmelCase__ )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : int ):
self.enable_attention_slicing(UpperCAmelCase__ )
def _lowercase ( self : str ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any] ):
# get the original timestep using init_timestep
__lowercase = min(int(num_inference_steps * strength ), UpperCAmelCase__ )
__lowercase = max(num_inference_steps - init_timestep, 0 )
__lowercase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : int=None ):
if not isinstance(UpperCAmelCase__, torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase__ )}""" )
__lowercase = image.to(device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase__ )
]
__lowercase = torch.cat(UpperCAmelCase__, dim=0 )
else:
__lowercase = self.vae.encode(UpperCAmelCase__ ).latent_dist.sample(UpperCAmelCase__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 0.18_215 * init_latents
__lowercase = init_latents.repeat_interleave(UpperCAmelCase__, dim=0 )
__lowercase = randn_tensor(init_latents.shape, generator=UpperCAmelCase__, device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
# get latents
__lowercase = self.scheduler.add_noise(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = init_latents
return latents
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Dict ):
__lowercase = self.coca_transform(UpperCAmelCase__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__lowercase = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) )
__lowercase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>", "" ).rstrip(" .," )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple ):
__lowercase = self.feature_extractor.preprocess(UpperCAmelCase__ )
__lowercase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = image_embeddings_clip.repeat_interleave(UpperCAmelCase__, dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _lowercase ( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[int], ):
__lowercase = latents.detach().requires_grad_()
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__lowercase = self.scheduler.alphas_cumprod[timestep]
__lowercase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__lowercase = torch.sqrt(UpperCAmelCase__ )
__lowercase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = self.scheduler.sigmas[index]
__lowercase = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * sample
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase__ )
__lowercase = self.normalize(UpperCAmelCase__ ).to(latents.dtype )
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = spherical_dist_loss(UpperCAmelCase__, UpperCAmelCase__ ).mean() * clip_guidance_scale
__lowercase = -torch.autograd.grad(UpperCAmelCase__, UpperCAmelCase__ )[0]
if isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = latents.detach() + grads * (sigma**2)
__lowercase = noise_pred_original
else:
__lowercase = noise_pred_original - torch.sqrt(UpperCAmelCase__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : str, UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : float = 0.6, UpperCAmelCase__ : Optional[int] = 5_0, UpperCAmelCase__ : Optional[float] = 7.5, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[float] = 1_0_0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : float = 0.8, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(UpperCAmelCase__ )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(UpperCAmelCase__, torch.Generator ) and batch_size > 1:
__lowercase = [generator] + [None] * (batch_size - 1)
__lowercase = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
__lowercase = [x[0] for x in coca_is_none if x[1]]
__lowercase = ", ".join(UpperCAmelCase__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
if style_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
# get prompt text embeddings for content and style
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# duplicate text embeddings for each generation per prompt
__lowercase = text_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# set timesteps
__lowercase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__lowercase = {}
if accepts_offset:
__lowercase = 1
self.scheduler.set_timesteps(UpperCAmelCase__, **UpperCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__lowercase ,__lowercase = self.get_timesteps(UpperCAmelCase__, UpperCAmelCase__, self.device )
__lowercase = timesteps[:1].repeat(UpperCAmelCase__ )
# Preprocess image
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if clip_guidance_scale > 0:
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = slerp(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase = content_text_input.input_ids.shape[-1]
__lowercase = self.tokenizer([""], padding="max_length", max_length=UpperCAmelCase__, return_tensors="pt" )
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__lowercase = uncond_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device="cpu", dtype=UpperCAmelCase__ ).to(
self.device )
else:
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device=self.device, dtype=UpperCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__lowercase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
# check if the scheduler accepts generator
__lowercase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__lowercase = generator
with self.progress_bar(total=UpperCAmelCase__ ):
for i, t in enumerate(UpperCAmelCase__ ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__lowercase ,__lowercase = noise_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__lowercase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__lowercase ,__lowercase = self.cond_fn(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * latents
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCAmelCase__, nsfw_content_detected=UpperCAmelCase__ )
| 17
| 0
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : str = prime_factors(lowercase__ )
if is_square_free(lowercase__ ):
return -1 if len(lowercase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
"""simple docstring"""
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = data
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[Any] = None
def lowerCamelCase_ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
lowerCamelCase__ : str = input('Enter the value of the root node: ' ).strip().lower()
lowerCamelCase__ : queue.Queue = queue.Queue()
lowerCamelCase__ : Optional[Any] = TreeNode(int(_lowerCamelCase ) )
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = q.get()
lowerCamelCase__ : str = f'''Enter the left node of {node_found.data}: '''
lowerCamelCase__ : Dict = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : str = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Dict = left_node
q.put(_lowerCamelCase )
lowerCamelCase__ : List[str] = f'''Enter the right node of {node_found.data}: '''
lowerCamelCase__ : List[str] = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : Optional[int] = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Any = right_node
q.put(_lowerCamelCase )
raise
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : Any = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = []
while not q.empty():
lowerCamelCase__ : str = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowerCamelCase__ : List[Any] = stack.pop()
# start to traverse its right child
lowerCamelCase__ : Optional[Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n:
stack.append(_lowerCamelCase )
lowerCamelCase__ : List[str] = n.left
lowerCamelCase__ : Tuple = stack.pop()
print(n.data , end=',' )
lowerCamelCase__ : Union[str, Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ , lowerCamelCase__ : Any = [], []
lowerCamelCase__ : int = node
stacka.append(_lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase__ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase = "" , _lowerCamelCase=50 , _lowerCamelCase="*" ):
if not s:
return "\n" + width * char
lowerCamelCase__ , lowerCamelCase__ : Dict = divmod(width - len(_lowerCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
A_ : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 316
| 0
|
import warnings
from ..trainer import Trainer
from ..utils import logging
SCREAMING_SNAKE_CASE_:Any = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__=None, **lowerCamelCase__ ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""", lowerCamelCase__, )
super().__init__(args=lowerCamelCase__, **lowerCamelCase__ )
| 116
|
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
while second != 0:
A : int = first & second
first ^= second
A : Tuple = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_:int = int(input("""Enter the first number: """).strip())
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 116
| 1
|
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
SCREAMING_SNAKE_CASE : int = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=1_6,
),
'''generator''': 2,
},
# 2048-bit
1_4: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=1_6,
),
'''generator''': 2,
},
# 3072-bit
1_5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=1_6,
),
'''generator''': 2,
},
# 4096-bit
1_6: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=1_6,
),
'''generator''': 2,
},
# 6144-bit
1_7: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=1_6,
),
'''generator''': 2,
},
# 8192-bit
1_8: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=1_6,
),
'''generator''': 2,
},
}
class __lowerCamelCase :
def __init__(self , lowerCamelCase = 14 ):
'''simple docstring'''
if group not in primes:
raise ValueError("""Unsupported Group""" )
_lowerCAmelCase = primes[group]["""prime"""]
_lowerCAmelCase = primes[group]["""generator"""]
_lowerCAmelCase = int(hexlify(urandom(32 ) ) , base=16 )
def A__ (self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = pow(self.generator , self.__private_key , self.prime )
return hex(lowerCamelCase )[2:]
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = int(lowerCamelCase , base=16 )
if not self.is_valid_public_key(lowerCamelCase ):
raise ValueError("""Invalid public key""" )
_lowerCAmelCase = pow(lowerCamelCase , self.__private_key , self.prime )
return shaaaa(str(lowerCamelCase ).encode() ).hexdigest()
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase , (prime - 1) // 2 , lowerCamelCase ) == 1
)
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase , lowerCamelCase = 14 ):
'''simple docstring'''
_lowerCAmelCase = int(lowerCamelCase , base=16 )
_lowerCAmelCase = int(lowerCamelCase , base=16 )
_lowerCAmelCase = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase , lowerCamelCase ):
raise ValueError("""Invalid public key""" )
_lowerCAmelCase = pow(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return shaaaa(str(lowerCamelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = DiTPipeline
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCamelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def A__ (self , lowerCamelCase , lowerCamelCase=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(lowerCamelCase )
else:
_lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
_lowerCAmelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase )
_lowerCAmelCase = pipe(**lowerCamelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1e-3 )
def A__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A__ (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 317
| 1
|
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
__lowercase : List[Any] = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
__lowercase : Any = DatasetInfosDict.from_directory(UpperCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
__lowercase : List[Any] = str(UpperCamelCase__ )
dataset_info.write_to_directory(UpperCamelCase__ )
__lowercase : Optional[int] = DatasetInfo.from_directory(UpperCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase__ , '''dataset_info.json''' ) )
def UpperCAmelCase_ ( ) -> Dict:
__lowercase : Optional[int] = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , )
__lowercase : Union[str, Any] = dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__lowercase : Any = yaml.safe_dump(UpperCamelCase__ )
__lowercase : Optional[int] = yaml.safe_load(UpperCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase_ ( ) -> List[Any]:
__lowercase : Any = DatasetInfo()
__lowercase : Optional[int] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1_337 ),
} ),
] , )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
__lowercase : Union[str, Any] = str(UpperCamelCase__ )
dataset_infos_dict.write_to_directory(UpperCamelCase__ )
__lowercase : Optional[Any] = DatasetInfosDict.from_directory(UpperCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__lowercase : Any = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__lowercase : str = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase__ , '''README.md''' ) )
| 156
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCAmelCase :Any = False
@skip_mps
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionAttendAndExcitePipeline
a__ =False
a__ =TEXT_TO_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , )
_UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_UpperCAmelCase : List[str] = CLIPTextModel(A )
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : List[str] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = '''cpu'''
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Dict = self.get_dummy_inputs(A )
_UpperCAmelCase : Union[str, Any] = pipe(**A ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
_UpperCAmelCase : int = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
_UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> str:
super().test_save_load_local(expected_max_difference=5E-4 )
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = torch.manual_seed(5_1 )
_UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses'''
_UpperCAmelCase : int = [5, 7]
_UpperCAmelCase : Dict = pipe(
prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
_UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263
| 0
|
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _lowercase ( ) -> Dict:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowerCamelCase ="""__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , _A ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _lowercase ( ) -> Optional[int]:
assert _test_patching.open is open
lowerCamelCase ="""__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , _A ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _lowercase ( ) -> List[str]:
lowerCamelCase ="""__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , _A ):
pass
def _lowercase ( ) -> Optional[int]:
lowerCamelCase ="""__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , _A ) is None
with patch_submodule(_test_patching , """len""" , _A ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _lowercase ( ) -> Optional[Any]:
lowerCamelCase ="""__test_patch_submodule_start_and_stop_mock__"""
lowerCamelCase =patch_submodule(_test_patching , """open""" , _A )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _lowercase ( ) -> Dict:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowerCamelCase ="""__test_patch_submodule_successive_join__"""
lowerCamelCase ="""__test_patch_submodule_successive_dirname__"""
lowerCamelCase ="""__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , _A ):
with patch_submodule(_test_patching , """os.rename""" , _A ):
with patch_submodule(_test_patching , """os.path.dirname""" , _A ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , _A ):
with patch_submodule(_test_patching , """os.path.join""" , _A ):
with patch_submodule(_test_patching , """os.path.dirname""" , _A ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _lowercase ( ) -> Union[str, Any]:
lowerCamelCase ="""__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , _A ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , _A ):
pass
| 355
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ : Union[str, Any] =16
UpperCAmelCase__ : Any =32
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase = 16 ) -> int:
lowerCamelCase =AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCamelCase =load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase =datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase =1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase =16
elif accelerator.mixed_precision != "no":
lowerCamelCase =8
else:
lowerCamelCase =None
return tokenizer.pad(
_UpperCAmelCase , padding="""longest""" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCamelCase =DataLoader(
tokenized_datasets["""train"""] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
lowerCamelCase =DataLoader(
tokenized_datasets["""validation"""] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ : Dict =mocked_dataloaders # noqa: F811
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _UpperCAmelCase ) == "1":
lowerCamelCase =2
# New Code #
lowerCamelCase =int(args.gradient_accumulation_steps )
lowerCamelCase =int(args.local_sgd_steps )
# Initialize accelerator
lowerCamelCase =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase =config["""lr"""]
lowerCamelCase =int(config["""num_epochs"""] )
lowerCamelCase =int(config["""seed"""] )
lowerCamelCase =int(config["""batch_size"""] )
lowerCamelCase =evaluate.load("""glue""" , """mrpc""" )
set_seed(_UpperCAmelCase )
lowerCamelCase , lowerCamelCase =get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase =model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase =AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
lowerCamelCase =get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase =accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
with LocalSGD(
accelerator=_UpperCAmelCase , model=_UpperCAmelCase , local_sgd_steps=_UpperCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCAmelCase ):
lowerCamelCase =model(**_UpperCAmelCase )
lowerCamelCase =output.loss
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase =model(**_UpperCAmelCase )
lowerCamelCase =outputs.logits.argmax(dim=-1 )
lowerCamelCase , lowerCamelCase =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
lowerCamelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _UpperCAmelCase )
def _lowercase ( ) -> Any:
lowerCamelCase =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=_UpperCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=_UpperCAmelCase , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCamelCase =parser.parse_args()
lowerCamelCase ={"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 262
| 0
|
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : List[str]=None , **lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , lowerCAmelCase__ , )
super().__init__(args=lowerCAmelCase__ , **lowerCAmelCase__ )
| 145
|
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class A__ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : int = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
_UpperCAmelCase : int = VideoClassificationPipeline(model=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , top_k=2 )
_UpperCAmelCase : int = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any ) -> int:
"""simple docstring"""
for example in examples:
_UpperCAmelCase : List[str] = video_classifier(lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
{"score": ANY(lowerCAmelCase__ ), "label": ANY(lowerCAmelCase__ )},
{"score": ANY(lowerCAmelCase__ ), "label": ANY(lowerCAmelCase__ )},
] , )
@require_torch
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
_UpperCAmelCase : Optional[int] = VideoMAEFeatureExtractor(
size={"shortest_edge": 1_0} , crop_size={"height": 1_0, "width": 1_0} )
_UpperCAmelCase : List[str] = pipeline(
"video-classification" , model=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , frame_sampling_rate=4 )
_UpperCAmelCase : Tuple = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
_UpperCAmelCase : Tuple = video_classifier(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}] , )
_UpperCAmelCase : Any = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
] , )
@require_tf
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
| 145
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __magic_name__ ( snake_case__ ):
"""simple docstring"""
__UpperCamelCase = """blenderbot-small"""
__UpperCamelCase = ["""past_key_values"""]
__UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Tuple , snake_case :Optional[Any]=50_265 , snake_case :Any=512 , snake_case :Optional[int]=8 , snake_case :Union[str, Any]=2_048 , snake_case :Tuple=16 , snake_case :List[str]=8 , snake_case :Union[str, Any]=2_048 , snake_case :List[Any]=16 , snake_case :List[str]=0.0 , snake_case :Any=0.0 , snake_case :Tuple=True , snake_case :List[str]=True , snake_case :str="gelu" , snake_case :Any=512 , snake_case :str=0.1 , snake_case :Optional[Any]=0.0 , snake_case :Optional[Any]=0.0 , snake_case :Dict=0.02 , snake_case :Optional[int]=1 , snake_case :List[str]=False , snake_case :Optional[Any]=0 , snake_case :Optional[int]=1 , snake_case :Optional[Any]=2 , snake_case :str=2 , **snake_case :List[Any] , ):
'''simple docstring'''
A_ : Tuple = vocab_size
A_ : Optional[int] = max_position_embeddings
A_ : Optional[Any] = d_model
A_ : List[str] = encoder_ffn_dim
A_ : Tuple = encoder_layers
A_ : List[Any] = encoder_attention_heads
A_ : Optional[int] = decoder_ffn_dim
A_ : Optional[Any] = decoder_layers
A_ : Dict = decoder_attention_heads
A_ : Optional[int] = dropout
A_ : str = attention_dropout
A_ : List[Any] = activation_dropout
A_ : Optional[Any] = activation_function
A_ : Optional[int] = init_std
A_ : int = encoder_layerdrop
A_ : str = decoder_layerdrop
A_ : int = use_cache
A_ : str = encoder_layers
A_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , )
class __magic_name__ ( snake_case__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A_ : Optional[Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A_ : int = {0: "batch"}
A_ : int = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A_ : List[Any] = {0: "batch", 1: "decoder_sequence"}
A_ : Dict = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_A , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A_ : Optional[Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A_ , A_ : List[Any] = self.num_layers
for i in range(_A ):
A_ : List[str] = {0: "batch", 2: "past_sequence + sequence"}
A_ : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"}
else:
A_ : Tuple = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A_ : List[str] = super().outputs
else:
A_ : Dict = super(_A , self ).outputs
if self.use_past:
A_ , A_ : Union[str, Any] = self.num_layers
for i in range(_A ):
A_ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"}
A_ : List[str] = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :List[str] , snake_case :Any = -1 , snake_case :int = -1 , snake_case :Dict = False , snake_case :List[Any] = None , ):
'''simple docstring'''
A_ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
# Generate decoder inputs
A_ : Optional[int] = seq_length if not self.use_past else 1
A_ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
A_ : str = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
A_ : List[str] = dict(**_A , **_A )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A_ , A_ : List[Any] = common_inputs["input_ids"].shape
A_ : List[Any] = common_inputs["decoder_input_ids"].shape[1]
A_ , A_ : List[Any] = self.num_attention_heads
A_ : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A_ : Dict = decoder_seq_length + 3
A_ : Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A_ : str = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_A , _A )] , dim=1 )
A_ : Tuple = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A_ , A_ : Optional[Any] = self.num_layers
A_ : List[str] = min(_A , _A )
A_ : Any = max(_A , _A ) - min_num_layers
A_ : List[str] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_A ):
common_inputs["past_key_values"].append(
(
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
) )
# TODO: test this.
A_ : Any = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_A , _A ):
common_inputs["past_key_values"].append((torch.zeros(_A ), torch.zeros(_A )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :List[str] = -1 , snake_case :List[str] = -1 , snake_case :Any = False , snake_case :Dict = None , ):
'''simple docstring'''
A_ : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A_ , A_ : List[str] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A_ : Dict = seqlen + 2
A_ , A_ : Union[str, Any] = self.num_layers
A_ , A_ : str = self.num_attention_heads
A_ : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A_ : Union[str, Any] = common_inputs["attention_mask"].dtype
A_ : int = torch.cat(
[common_inputs["attention_mask"], torch.ones(_A , _A , dtype=_A )] , dim=1 )
A_ : Optional[int] = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(_A )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Optional[int] , snake_case :Optional[int] = -1 , snake_case :Dict = -1 , snake_case :str = False , snake_case :List[str] = None , ):
'''simple docstring'''
A_ : Dict = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A_ : List[Any] = tokenizer.num_special_tokens_to_add(_A )
A_ : Tuple = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
A_ : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
A_ : List[str] = dict(tokenizer(_A , return_tensors=_A ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[Any] , snake_case :str = -1 , snake_case :Dict = -1 , snake_case :str = False , snake_case :Optional[Any] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A_ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
elif self.task == "causal-lm":
A_ : Dict = self._generate_dummy_inputs_for_causal_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
else:
A_ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
return common_inputs
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Optional[Any] , snake_case :str , snake_case :Optional[int] , snake_case :Optional[int] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A_ : List[Any] = super()._flatten_past_key_values_(_A , _A , _A , _A )
else:
A_ : Dict = super(_A , self )._flatten_past_key_values_(
_A , _A , _A , _A )
| 363
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Any = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
_lowerCAmelCase : int = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70
| 0
|
def __snake_case ( _UpperCAmelCase ):
__a = len(_UpperCAmelCase )
for i in range(length - 1 ):
__a = i
for k in range(i + 1 , _UpperCAmelCase ):
if collection[k] < collection[least]:
__a = k
if least != i:
__a , __a = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__snake_case :int = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case :Optional[int] = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 49
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCamelCase : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = v.to_dict()
return d
| 316
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 16
|
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : int = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : Union[str, Any] = seq_length
__UpperCAmelCase : int = is_training
__UpperCAmelCase : Union[str, Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : List[Any] = type_vocab_size
__UpperCAmelCase : Dict = type_sequence_label_size
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : Optional[Any] = num_labels
__UpperCAmelCase : Optional[Any] = num_choices
__UpperCAmelCase : int = scope
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : List[Any] = None
if self.use_input_mask:
__UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = None
if self.use_token_type_ids:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> List[str]:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.get_config()
__UpperCAmelCase : List[Any] = 300
return config
def __A ( self ) -> Dict:
'''simple docstring'''
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = self.prepare_config_and_inputs()
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = MraModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__UpperCAmelCase : Any = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__UpperCAmelCase : List[str] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[Any] = MraModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__UpperCAmelCase : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
__UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = MraForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
__UpperCAmelCase : str = MraForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Optional[Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : int = MraForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : str = MraForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = self.num_choices
__UpperCAmelCase : int = MraForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[str] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
__UpperCAmelCase : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Dict = ()
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = MraModelTester(self )
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __A ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : List[Any] = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def __A ( self ) -> Any:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Tuple = MraModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="""MRA does not output attentions""" )
def __A ( self ) -> List[Any]:
'''simple docstring'''
return
@require_torch
class _A ( unittest.TestCase ):
@slow
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
__UpperCAmelCase : str = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(__UpperCAmelCase )[0]
__UpperCAmelCase : Optional[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
__UpperCAmelCase : int = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
__UpperCAmelCase : Union[str, Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__UpperCAmelCase : int = model(__UpperCAmelCase )[0]
__UpperCAmelCase : Union[str, Any] = 50_265
__UpperCAmelCase : Union[str, Any] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__UpperCAmelCase : int = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
__UpperCAmelCase : Dict = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__UpperCAmelCase : Any = model(__UpperCAmelCase )[0]
__UpperCAmelCase : Dict = 50_265
__UpperCAmelCase : Optional[int] = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__UpperCAmelCase : str = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 16
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : str = KandinskyVaaPipeline
snake_case_ : str = [
"""image_embeds""",
"""negative_image_embeds""",
]
snake_case_ : List[Any] = ["""image_embeds""", """negative_image_embeds"""]
snake_case_ : Dict = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case_ : Any = False
@property
def UpperCamelCase_ ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
return 32
@property
def UpperCamelCase_ ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def UpperCamelCase_ ( self : Dict) -> Dict:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase_ ( self : Optional[int]) -> int:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Tuple) -> int:
"""simple docstring"""
return 100
@property
def UpperCamelCase_ ( self : str) -> Tuple:
"""simple docstring"""
torch.manual_seed(0)
_snake_case : Any = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_snake_case : Union[str, Any] = UNetaDConditionModel(**lowerCAmelCase)
return model
@property
def UpperCamelCase_ ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase_ ( self : Tuple) -> str:
"""simple docstring"""
torch.manual_seed(0)
_snake_case : int = VQModel(**self.dummy_movq_kwargs)
return model
def UpperCamelCase_ ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_snake_case : Optional[int] = self.dummy_unet
_snake_case : Union[str, Any] = self.dummy_movq
_snake_case : List[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowerCAmelCase , )
_snake_case : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCamelCase_ ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[Any]=0) -> Optional[Any]:
"""simple docstring"""
_snake_case : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase)).to(lowerCAmelCase)
_snake_case : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowerCAmelCase)
if str(lowerCAmelCase).startswith("""mps"""):
_snake_case : Tuple = torch.manual_seed(lowerCAmelCase)
else:
_snake_case : int = torch.Generator(device=lowerCAmelCase).manual_seed(lowerCAmelCase)
_snake_case : Optional[int] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase_ ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_snake_case : Tuple = """cpu"""
_snake_case : List[str] = self.get_dummy_components()
_snake_case : Any = self.pipeline_class(**lowerCAmelCase)
_snake_case : List[str] = pipe.to(lowerCAmelCase)
pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : Optional[int] = pipe(**self.get_dummy_inputs(lowerCAmelCase))
_snake_case : Any = output.images
_snake_case : List[str] = pipe(
**self.get_dummy_inputs(lowerCAmelCase) , return_dict=lowerCAmelCase , )[0]
_snake_case : Tuple = image[0, -3:, -3:, -1]
_snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : int = np.array(
[0.6_237_976, 1.0, 0.36_441_332, 1.0, 0.70_639_634, 0.29_877_186, 0.85_652_125, 0.5_216_843, 0.54_454_046])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Any) -> str:
"""simple docstring"""
_snake_case : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy""")
_snake_case : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa)
pipe_prior.to(lowerCAmelCase)
_snake_case : Any = KandinskyVaaPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa)
_snake_case : Optional[int] = pipeline.to(lowerCAmelCase)
pipeline.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : Optional[Any] = """red cat, 4k photo"""
_snake_case : int = torch.Generator(device="""cuda""").manual_seed(0)
_snake_case , _snake_case : Dict = pipe_prior(
lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_snake_case : str = torch.Generator(device="""cuda""").manual_seed(0)
_snake_case : Dict = pipeline(
image_embeds=lowerCAmelCase , negative_image_embeds=lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=100 , output_type="""np""" , )
_snake_case : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase)
| 317
|
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase : NestedDataStructureLike[PathLike] , lowerCAmelCase : Optional[NamedSplit] = None , lowerCAmelCase : Optional[Features] = None , lowerCAmelCase : str = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase , split=lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase , streaming=lowerCAmelCase , num_proc=lowerCAmelCase , **lowerCAmelCase , )
_snake_case : Tuple = field
_snake_case : str = path_or_paths if isinstance(lowerCAmelCase , lowerCAmelCase) else {self.split: path_or_paths}
_snake_case : int = Json(
cache_dir=lowerCAmelCase , data_files=lowerCAmelCase , features=lowerCAmelCase , field=lowerCAmelCase , **lowerCAmelCase , )
def UpperCamelCase_ ( self : Any) -> Tuple:
"""simple docstring"""
if self.streaming:
_snake_case : int = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_snake_case : Dict = None
_snake_case : Optional[int] = None
_snake_case : Optional[Any] = None
_snake_case : str = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase , download_mode=lowerCAmelCase , verification_mode=lowerCAmelCase , base_path=lowerCAmelCase , num_proc=self.num_proc , )
_snake_case : List[str] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase , in_memory=self.keep_in_memory)
return dataset
class snake_case :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : Dataset , lowerCAmelCase : Union[PathLike, BinaryIO] , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Any , ) -> Optional[int]:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''')
_snake_case : Optional[Any] = dataset
_snake_case : str = path_or_buf
_snake_case : Optional[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_snake_case : Tuple = num_proc
_snake_case : Dict = """utf-8"""
_snake_case : str = to_json_kwargs
def UpperCamelCase_ ( self : Optional[Any]) -> int:
"""simple docstring"""
_snake_case : Optional[Any] = self.to_json_kwargs.pop("""path_or_buf""" , lowerCAmelCase)
_snake_case : Any = self.to_json_kwargs.pop("""orient""" , """records""")
_snake_case : List[str] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False)
_snake_case : List[Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True)
_snake_case : Union[str, Any] = self.to_json_kwargs.pop("""compression""" , lowerCAmelCase)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''')
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowerCAmelCase) as buffer:
_snake_case : List[str] = self._write(file_obj=lowerCAmelCase , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
""" was passed. Please provide a local path instead.""")
_snake_case : Tuple = self._write(
file_obj=self.path_or_buf , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **self.to_json_kwargs)
return written
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : int = args
_snake_case : int = query_table(
table=self.dataset.data , key=slice(lowerCAmelCase , offset + self.batch_size) , indices=self.dataset._indices , )
_snake_case : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=lowerCAmelCase , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **lowerCAmelCase)
if not json_str.endswith("""\n"""):
json_str += "\n"
return json_str.encode(self.encoding)
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : BinaryIO , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , **lowerCAmelCase : List[Any] , ) -> int:
"""simple docstring"""
_snake_case : Optional[int] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
_snake_case : Tuple = self._batch_json((offset, orient, lines, index, to_json_kwargs))
written += file_obj.write(lowerCAmelCase)
else:
_snake_case , _snake_case : str = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCAmelCase , lowerCAmelCase)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowerCAmelCase)
return written
| 317
| 1
|
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.0 , lowercase=0.1 , lowercase=True , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
_lowerCamelCase : Any = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : Union[str, Any] = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Optional[Any] = use_input_mask
_lowerCamelCase : Any = use_token_type_ids
_lowerCamelCase : int = use_labels
_lowerCamelCase : int = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : Dict = intermediate_multiple_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Optional[int] = hidden_dropout
_lowerCamelCase : Tuple = attention_dropout
_lowerCamelCase : int = weight_tying
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : List[str] = type_sequence_label_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : List[str] = num_labels
_lowerCamelCase : str = num_choices
_lowerCamelCase : List[str] = scope
def A_ ( self ):
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Any = None
if self.use_input_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def A_ ( self ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCamelCase : str = True
return config, input_ids, input_mask, token_labels
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Union[str, Any] = GPTNeoXJapaneseModel(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Optional[int] = model(lowercase , attention_mask=lowercase )
_lowerCamelCase : Tuple = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Tuple = True
_lowerCamelCase : str = GPTNeoXJapaneseModel(lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Dict = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : str = GPTNeoXJapaneseForCausalLM(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : List[Any] = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Optional[Any] = GPTNeoXJapaneseForCausalLM(config=lowercase )
model.to(lowercase )
model.eval()
# first forward pass
_lowerCamelCase : List[Any] = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_lowerCamelCase : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCamelCase : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCamelCase : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCamelCase : Tuple = model(lowercase , attention_mask=lowercase , output_hidden_states=lowercase )
_lowerCamelCase : Any = output_from_no_past['hidden_states'][0]
_lowerCamelCase : Optional[Any] = model(
lowercase , attention_mask=lowercase , past_key_values=lowercase , output_hidden_states=lowercase , )['hidden_states'][0]
# select random slice
_lowerCamelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCamelCase : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-3 ) )
def A_ ( self ):
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = config_and_inputs
_lowerCamelCase : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCamelCase__ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def A_ ( self ):
_lowerCamelCase : Tuple = GPTNeoXJapaneseModelTester(self )
_lowerCamelCase : Optional[int] = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase , lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowercase , lowercase , lowercase )
def A_ ( self ):
# This regression test was failing with PyTorch < 1.3
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowerCamelCase : int = None
self.model_tester.create_and_check_model_as_decoder(lowercase , lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase , lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowercase )
@slow
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = 'abeja/gpt-neox-japanese-2.7b'
_lowerCamelCase : List[str] = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
_lowerCamelCase : Any = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
_lowerCamelCase : List[Any] = GPTNeoXJapaneseTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Any = GPTNeoXJapaneseForCausalLM.from_pretrained(lowercase )
_lowerCamelCase : str = []
for prompt in prompts:
_lowerCamelCase : Union[str, Any] = tokenizer(lowercase , return_tensors='pt' ).input_ids
_lowerCamelCase : Tuple = model.generate(lowercase , max_length=50 )
_lowerCamelCase : str = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
predicted_outputs += generated_string
self.assertListEqual(lowercase , lowercase )
| 12
|
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowercase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowercase__ = []
lowercase__ = []
lowercase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowercase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
"""emoji""": True,
},
}
]
lowercase__ = 0
for log in Path().glob("""*.log"""):
lowercase__ = 0
with open(log, """r""") as f:
for line in f:
lowercase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowercase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowercase__ = F"{line['duration']:.4f}"
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowercase__ = []
log.unlink()
lowercase__ = """"""
lowercase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowercase__ = []
lowercase__ = {}
for test in failed_tests:
lowercase__ = test[0].split("""::""")
lowercase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowercase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowercase__ = [test[0] for test in failed_table]
lowercase__ = list(set(files))
# Count number of instances in failed_tests
lowercase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowercase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowercase__ = """Too many failed tests, please see the full report in the Action results."""
lowercase__ = len(err) + 10
lowercase__ = message[: 3000 - offset] + F"\n...\n```\n{err}"
print(F"### {message}")
else:
lowercase__ = """No failed tests! 🤗"""
print(F"## {message}")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowercase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
lowercase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
lowercase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowercase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowercase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowercase__ = row[0]
else:
lowercase__ = """"""
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 12
| 1
|
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def lowerCamelCase ( self , lowerCAmelCase_=0 ):
"""simple docstring"""
_snake_case = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(lowerCAmelCase_ ) )
_snake_case = np.random.RandomState(lowerCAmelCase_ )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
# warmup pass to apply optimizations
_snake_case = pipe(**self.get_dummy_inputs() )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = ort.SessionOptions()
_snake_case = False
return options
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
_snake_case = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A fantasy landscape, trending on artstation'
_snake_case = np.random.RandomState(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase_ , output_type='np' , )
_snake_case = output.images
_snake_case = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
_snake_case = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
_snake_case = init_image.resize((7_68, 5_12) )
_snake_case = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A fantasy landscape, trending on artstation'
_snake_case = np.random.RandomState(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCAmelCase_ , output_type='np' , )
_snake_case = output.images
_snake_case = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
_snake_case = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 42
|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCAmelCase ( )-> int:
lowerCAmelCase_ : int = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
lowerCAmelCase_ : Dict = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase_ )
DownloadCommand.register_subcommand(lowerCAmelCase_ )
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
RunCommand.register_subcommand(lowerCAmelCase_ )
ServeCommand.register_subcommand(lowerCAmelCase_ )
UserCommands.register_subcommand(lowerCAmelCase_ )
AddNewModelCommand.register_subcommand(lowerCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ )
LfsCommands.register_subcommand(lowerCAmelCase_ )
PTtoTFCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowerCAmelCase_ : List[Any] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 262
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None ):
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : str = tf.cast(tf.math.not_equal(UpperCAmelCase_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowercase__ :
lowercase__ = OPTConfig
lowercase__ = {}
lowercase__ = """gelu"""
def __init__( self : Union[str, Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any]=13 ,lowerCamelCase__ : Optional[int]=7 ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Dict=99 ,lowerCamelCase__ : Dict=16 ,lowerCamelCase__ : Dict=2 ,lowerCamelCase__ : Any=4 ,lowerCamelCase__ : Any=4 ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : str=0.1 ,lowerCamelCase__ : List[str]=20 ,lowerCamelCase__ : List[str]=2 ,lowerCamelCase__ : Tuple=1 ,lowerCamelCase__ : int=0 ,lowerCamelCase__ : List[Any]=16 ,lowerCamelCase__ : List[Any]=16 ,):
'''simple docstring'''
_UpperCamelCase : Tuple = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : Optional[int] = seq_length
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : List[str] = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : List[str] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : List[Any] = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : Any = eos_token_id
_UpperCamelCase : Union[str, Any] = pad_token_id
_UpperCamelCase : Union[str, Any] = bos_token_id
_UpperCamelCase : Tuple = embed_dim
_UpperCamelCase : List[Any] = word_embed_proj_dim
_UpperCamelCase : Optional[Any] = False
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
_UpperCamelCase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
_UpperCamelCase : List[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
_UpperCamelCase : str = self.config_cls(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,embed_dim=self.embed_dim ,word_embed_proj_dim=self.word_embed_proj_dim ,is_encoder_decoder=lowerCamelCase__ ,**self.config_updates ,)
_UpperCamelCase : Optional[int] = prepare_opt_inputs_dict(lowerCamelCase__ ,lowerCamelCase__ )
return config, inputs_dict
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Any = TFOPTModel(config=lowerCamelCase__ )
_UpperCamelCase : List[str] = inputs_dict['input_ids']
_UpperCamelCase : Any = input_ids[:1, :]
_UpperCamelCase : Optional[Any] = inputs_dict['attention_mask'][:1, :]
_UpperCamelCase : Dict = 1
# first forward pass
_UpperCamelCase : int = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,use_cache=lowerCamelCase__ )
_UpperCamelCase : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
_UpperCamelCase : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
_UpperCamelCase : Tuple = tf.concat([input_ids, next_tokens] ,axis=-1 )
_UpperCamelCase : Tuple = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
_UpperCamelCase : List[Any] = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ )[0]
_UpperCamelCase : Dict = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,past_key_values=lowerCamelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
_UpperCamelCase : Optional[int] = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
_UpperCamelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase__ ,lowerCamelCase__ ,rtol=1E-3 )
@require_tf
class lowercase__ ( lowercase , lowercase , unittest.TestCase ):
lowercase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowercase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowercase__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = 10
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : Dict = TFOPTModelTester(self )
_UpperCamelCase : Any = ConfigTester(self ,config_class=lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : int ):
if hasattr(lowerCamelCase__ ,'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCamelCase__ ,'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_UpperCamelCase : Dict = model_class(config=lowerCamelCase__ )
_UpperCamelCase : Dict = _get_word_embedding_weight(lowerCamelCase__ ,model.get_input_embeddings() )
_UpperCamelCase : List[Any] = _get_word_embedding_weight(lowerCamelCase__ ,model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCamelCase__ )
_UpperCamelCase : Optional[int] = _get_word_embedding_weight(lowerCamelCase__ ,model.get_input_embeddings() )
_UpperCamelCase : Dict = _get_word_embedding_weight(lowerCamelCase__ ,model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_UpperCamelCase : List[Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] ,lowerCamelCase__ )
# check that weights remain the same after resizing
_UpperCamelCase : Union[str, Any] = True
for pa, pa in zip(old_input_embeddings.value() ,new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_UpperCamelCase : Dict = False
self.assertTrue(lowerCamelCase__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] ,lowerCamelCase__ )
_UpperCamelCase : Any = True
for pa, pa in zip(old_output_embeddings.value() ,new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_UpperCamelCase : Optional[int] = False
self.assertTrue(lowerCamelCase__ )
def A__ ( UpperCAmelCase_ ):
"""simple docstring"""
return tf.constant(UpperCAmelCase_ , dtype=tf.intaa )
@require_tf
class lowercase__ ( unittest.TestCase ):
lowercase__ = 99
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = tf.ones((4, 1) ,dtype=tf.intaa ) * 2
_UpperCamelCase : List[Any] = tf.concat([ids_tensor((4, 6) ,self.vocab_size - 3 ) + 3, eos_column_vector] ,axis=1 )
_UpperCamelCase : Optional[int] = input_ids.shape[0]
_UpperCamelCase : str = OPTConfig(
vocab_size=self.vocab_size ,hidden_size=24 ,num_hidden_layers=2 ,num_attention_heads=2 ,ffn_dim=32 ,max_position_embeddings=48 ,eos_token_id=2 ,pad_token_id=1 ,bos_token_id=0 ,)
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : int = TFOPTModel.from_pretrained('facebook/opt-350m' )
_UpperCamelCase : Union[str, Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_UpperCamelCase : Tuple = tf.not_equal(lowerCamelCase__ ,model.config.pad_token_id )
with tf.GradientTape():
_UpperCamelCase : str = model(input_ids=lowerCamelCase__ ,attention_mask=lowerCamelCase__ ).last_hidden_state
_UpperCamelCase : Any = (1, 11, 512)
self.assertEqual(output.shape ,lowerCamelCase__ )
_UpperCamelCase : List[str] = tf.constant(
[[-0.2_8_7_3, -1.9_2_1_8, -0.3_0_3_3], [-1.2_7_1_0, -0.1_3_3_8, -0.1_9_0_2], [0.4_0_9_5, 0.1_2_1_4, -1.3_1_2_1]] )
self.assertTrue(np.allclose(output[:, :3, :3] ,lowerCamelCase__ ,atol=4E-3 ) )
_UpperCamelCase : List[Any] = tf.function(lowerCamelCase__ ,jit_compile=lowerCamelCase__ )
_UpperCamelCase : Optional[int] = xla_generate(lowerCamelCase__ ,lowerCamelCase__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] ,lowerCamelCase__ ,atol=4E-2 ) )
@require_tf
@slow
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
_UpperCamelCase : Optional[int] = 'facebook/opt-350m'
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
_UpperCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(self.path_model )
_UpperCamelCase : List[Any] = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_UpperCamelCase : Dict = tokenizer(lowerCamelCase__ ,return_tensors='tf' ,padding=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
_UpperCamelCase : List[str] = tf.math.reduce_mean(model(inputs.input_ids ,attention_mask=inputs.attention_mask )[0] ,axis=-1 )
_UpperCamelCase : List[Any] = tf.constant(
[
[1.3_8_5_1, -13.8923, -10.5229, -10.7533, -0.2_3_0_9, -10.2384, -0.5_3_6_5, -9.0_9_4_7, -5.1_6_7_0],
[-4.7_0_7_3, -10.6276, -3.9_4_1_5, -21.5242, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2],
[0.6_2_4_7, -3.4_2_2_9, -8.9_1_7_9, -1.4_2_9_7, -14.1650, 1.4_1_4_6, -9.0_2_1_8, -0.2_7_0_3, -0.2_7_0_3],
[6.4_7_8_3, -1.9_9_1_3, -10.7926, -2.3_3_3_6, 1.5_0_9_2, -0.9_9_7_4, -6.8_2_1_3, 1.3_4_7_7, 1.3_4_7_7],
] )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-4 ) )
_UpperCamelCase : int = tf.function(lowerCamelCase__ ,jit_compile=lowerCamelCase__ )
_UpperCamelCase : Any = tf.math.reduce_mean(xla_generate(inputs.input_ids ,attention_mask=inputs.attention_mask )[0] ,axis=-1 )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-4 ) )
@require_tf
@slow
class lowercase__ ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = 'facebook/opt-125m'
_UpperCamelCase : int = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_UpperCamelCase : List[Any] = []
_UpperCamelCase : str = GPTaTokenizer.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : List[str] = TFOPTForCausalLM.from_pretrained(lowerCamelCase__ )
for prompt in self.prompts:
_UpperCamelCase : List[Any] = tokenizer(lowerCamelCase__ ,return_tensors='tf' ).input_ids
_UpperCamelCase : Any = model.generate(lowerCamelCase__ ,max_length=10 )
_UpperCamelCase : Dict = tokenizer.batch_decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Dict = 'facebook/opt-350m'
_UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : List[Any] = TFOPTForCausalLM.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Dict = 'left'
# use different length sentences to test batching
_UpperCamelCase : List[Any] = [
'Hello, my dog is a little',
'Today, I',
]
_UpperCamelCase : List[str] = tokenizer(lowerCamelCase__ ,return_tensors='tf' ,padding=lowerCamelCase__ )
_UpperCamelCase : Optional[int] = inputs['input_ids']
_UpperCamelCase : List[Any] = model.generate(input_ids=lowerCamelCase__ ,attention_mask=inputs['attention_mask'] )
_UpperCamelCase : Dict = tokenizer(sentences[0] ,return_tensors='tf' ).input_ids
_UpperCamelCase : Union[str, Any] = model.generate(input_ids=lowerCamelCase__ )
_UpperCamelCase : Dict = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] ,tf.intaa ) )
_UpperCamelCase : Union[str, Any] = tokenizer(sentences[1] ,return_tensors='tf' ).input_ids
_UpperCamelCase : Any = model.generate(input_ids=lowerCamelCase__ ,max_length=model.config.max_length - num_paddings )
_UpperCamelCase : Optional[int] = tokenizer.batch_decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ )
_UpperCamelCase : Dict = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=lowerCamelCase__ )
_UpperCamelCase : List[Any] = tokenizer.decode(output_padded[0] ,skip_special_tokens=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,[non_padded_sentence, padded_sentence] )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = 'facebook/opt-350m'
_UpperCamelCase : Any = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Optional[Any] = GPTaTokenizer.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Any = TFOPTForCausalLM.from_pretrained(lowerCamelCase__ )
for prompt in self.prompts:
_UpperCamelCase : Union[str, Any] = tokenizer(lowerCamelCase__ ,return_tensors='tf' ).input_ids
_UpperCamelCase : str = model.generate(lowerCamelCase__ ,max_length=10 )
_UpperCamelCase : Tuple = tokenizer.batch_decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
| 353
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = DanceDiffusionPipeline
lowercase__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase__ = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
lowercase__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : str = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=16000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=lowerCamelCase__ ,use_timestep_embedding=lowerCamelCase__ ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_UpperCamelCase : int = IPNDMScheduler()
_UpperCamelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int=0 ):
'''simple docstring'''
if str(lowerCamelCase__ ).startswith('mps' ):
_UpperCamelCase : Union[str, Any] = torch.manual_seed(lowerCamelCase__ )
else:
_UpperCamelCase : str = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_UpperCamelCase : str = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : List[str] = self.get_dummy_components()
_UpperCamelCase : int = DanceDiffusionPipeline(**lowerCamelCase__ )
_UpperCamelCase : List[str] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(lowerCamelCase__ )
_UpperCamelCase : List[Any] = pipe(**lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = output.audios
_UpperCamelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCamelCase : Dict = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = torch_device
_UpperCamelCase : Dict = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_UpperCamelCase : str = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : int = torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = pipe(generator=lowerCamelCase__ ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_UpperCamelCase : Optional[int] = output.audios
_UpperCamelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCamelCase : Tuple = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = torch_device
_UpperCamelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_UpperCamelCase : Any = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = pipe(generator=lowerCamelCase__ ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_UpperCamelCase : Any = output.audios
_UpperCamelCase : str = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCamelCase : Any = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 236
| 0
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__magic_name__ = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__magic_name__ = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
__magic_name__ = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def snake_case_ ( self):
if version.parse(scb.__version__) < version.parse("""1.4.12"""):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""") , id="""references"""),
}) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = CHRF.CHAR_ORDER , lowerCAmelCase__ = CHRF.WORD_ORDER , lowerCAmelCase__ = CHRF.BETA , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , ):
__SCREAMING_SNAKE_CASE = len(references[0])
if any(len(lowerCAmelCase__) != references_per_prediction for refs in references):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""")
__SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(lowerCAmelCase__)]
__SCREAMING_SNAKE_CASE = CHRF(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = sb_chrf.corpus_score(lowerCAmelCase__ , lowerCAmelCase__)
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 100
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Optional[Any] ={
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] =[
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
A__ : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70
| 0
|
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase = val
__lowerCAmelCase = None
__lowerCAmelCase = None
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> str:
if self.val:
if val < self.val:
if self.left is None:
__lowerCAmelCase = Node(lowerCAmelCase_ )
else:
self.left.insert(lowerCAmelCase_ )
elif val > self.val:
if self.right is None:
__lowerCAmelCase = Node(lowerCAmelCase_ )
else:
self.right.insert(lowerCAmelCase_ )
else:
__lowerCAmelCase = val
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str] ):
# Recursive traversal
if root:
inorder(root.left, _lowerCAmelCase )
res.append(root.val )
inorder(root.right, _lowerCAmelCase )
def a_ ( lowerCAmelCase_ : Any ):
# Build BST
if len(_lowerCAmelCase ) == 0:
return arr
__lowerCAmelCase = Node(arr[0] )
for i in range(1, len(_lowerCAmelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
__lowerCAmelCase = []
inorder(_lowerCAmelCase, _lowerCAmelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 354
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : Optional[Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 207
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.