code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
# Function to print upper half of diamond (pyramid)
def _a ( lowerCAmelCase )-> Tuple:
for i in range(0 , __lowercase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def _a ( lowerCAmelCase )-> int:
for i in range(__lowercase , 0 , -1 ):
for _ in range(__lowercase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def _a ( lowerCAmelCase )-> Dict:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(__lowercase ) # upper half
reverse_floyd(__lowercase ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
SCREAMING_SNAKE_CASE: Union[str, Any] = 1
while K:
SCREAMING_SNAKE_CASE: str = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
SCREAMING_SNAKE_CASE: Tuple = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 360
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class lowerCAmelCase_ :
"""simple docstring"""
@staticmethod
def __lowercase( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
pass
def _a ( __lowercase ) -> str:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_snake_case = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
__UpperCamelCase = pipeline(
'document-question-answering' , model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = INVOICE_URL
__UpperCamelCase = list(zip(*apply_tesseract(load_image(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , '' ) ) )
__UpperCamelCase = 'What is the placebo?'
__UpperCamelCase = [
{
'image': load_image(_SCREAMING_SNAKE_CASE ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
__UpperCamelCase = dqa_pipeline(_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[
{'score': ANY(_SCREAMING_SNAKE_CASE ), 'answer': ANY(_SCREAMING_SNAKE_CASE ), 'start': ANY(_SCREAMING_SNAKE_CASE ), 'end': ANY(_SCREAMING_SNAKE_CASE )},
{'score': ANY(_SCREAMING_SNAKE_CASE ), 'answer': ANY(_SCREAMING_SNAKE_CASE ), 'start': ANY(_SCREAMING_SNAKE_CASE ), 'end': ANY(_SCREAMING_SNAKE_CASE )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __lowercase( self ) -> Dict:
__UpperCamelCase = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
__UpperCamelCase = INVOICE_URL
__UpperCamelCase = 'How many cats are there?'
__UpperCamelCase = [
{'score': 0.0_0_0_1, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.0_0_0_1, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
__UpperCamelCase = dqa_pipeline(image=_SCREAMING_SNAKE_CASE , question=_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , _SCREAMING_SNAKE_CASE )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__UpperCamelCase = './tests/fixtures/tests_samples/COCO/000000039769.png'
__UpperCamelCase = dqa_pipeline(image=_SCREAMING_SNAKE_CASE , question=_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(_SCREAMING_SNAKE_CASE , [] )
# We can optionnally pass directly the words and bounding boxes
__UpperCamelCase = './tests/fixtures/tests_samples/COCO/000000039769.png'
__UpperCamelCase = []
__UpperCamelCase = []
__UpperCamelCase = dqa_pipeline(image=_SCREAMING_SNAKE_CASE , question=_SCREAMING_SNAKE_CASE , words=_SCREAMING_SNAKE_CASE , boxes=_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(_SCREAMING_SNAKE_CASE , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowercase( self ) -> str:
__UpperCamelCase = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
__UpperCamelCase = INVOICE_URL
__UpperCamelCase = 'What is the invoice number?'
__UpperCamelCase = dqa_pipeline(image=_SCREAMING_SNAKE_CASE , question=_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__UpperCamelCase = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__UpperCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowercase( self ) -> int:
__UpperCamelCase = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , )
__UpperCamelCase = INVOICE_URL
__UpperCamelCase = 'What is the invoice number?'
__UpperCamelCase = dqa_pipeline(image=_SCREAMING_SNAKE_CASE , question=_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__UpperCamelCase = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__UpperCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowercase( self ) -> Optional[int]:
__UpperCamelCase = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=_SCREAMING_SNAKE_CASE , revision='3dc6de3' , )
__UpperCamelCase = INVOICE_URL
__UpperCamelCase = 'What is the invoice number?'
__UpperCamelCase = dqa_pipeline(image=_SCREAMING_SNAKE_CASE , question=_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
__UpperCamelCase = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
__UpperCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 , )
__UpperCamelCase = list(zip(*apply_tesseract(load_image(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , '' ) ) )
# This model should also work if `image` is set to None
__UpperCamelCase = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowercase( self ) -> Dict:
__UpperCamelCase = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=_SCREAMING_SNAKE_CASE , revision='3dc6de3' , max_seq_len=50 , )
__UpperCamelCase = INVOICE_URL
__UpperCamelCase = 'What is the invoice number?'
__UpperCamelCase = dqa_pipeline(image=_SCREAMING_SNAKE_CASE , question=_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__UpperCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
__UpperCamelCase = list(zip(*apply_tesseract(load_image(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , '' ) ) )
# This model should also work if `image` is set to None
__UpperCamelCase = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
@slow
@require_torch
def __lowercase( self ) -> Union[str, Any]:
__UpperCamelCase = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
__UpperCamelCase = INVOICE_URL
__UpperCamelCase = 'What is the invoice number?'
__UpperCamelCase = dqa_pipeline(image=_SCREAMING_SNAKE_CASE , question=_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def __lowercase( self ) -> Optional[Any]:
pass
| 383
| 0
|
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Optional[Any] = """linear"""
SCREAMING_SNAKE_CASE : Union[str, Any] = """cosine"""
SCREAMING_SNAKE_CASE : Tuple = """cosine_with_restarts"""
SCREAMING_SNAKE_CASE : Tuple = """polynomial"""
SCREAMING_SNAKE_CASE : Tuple = """constant"""
SCREAMING_SNAKE_CASE : Dict = """constant_with_warmup"""
SCREAMING_SNAKE_CASE : Tuple = """piecewise_constant"""
def lowerCamelCase__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int = -1 ) -> Optional[Any]:
return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase : 1 , last_epoch=_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int = -1 ) -> int:
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1.0 , _lowerCamelCase ) )
return 1.0
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : str , _lowerCamelCase : int = -1 ) -> Tuple:
lowerCamelCase_ = {}
lowerCamelCase_ = step_rules.split(',' )
for rule_str in rule_list[:-1]:
lowerCamelCase_ , lowerCamelCase_ = rule_str.split(':' )
lowerCamelCase_ = int(_lowerCamelCase )
lowerCamelCase_ = float(_lowerCamelCase )
lowerCamelCase_ = value
lowerCamelCase_ = float(rule_list[-1] )
def create_rules_function(_lowerCamelCase : Optional[int] , _lowerCamelCase : int ):
def rule_func(_lowerCamelCase : int ) -> float:
lowerCamelCase_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCamelCase_ = create_rules_function(_lowerCamelCase , _lowerCamelCase )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Optional[Any]=-1 ) -> int:
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0.5 , _lowerCamelCase : int = -1 ) -> str:
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
lowerCamelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 1 , _lowerCamelCase : int = -1 ) -> List[Any]:
def lr_lambda(_lowerCamelCase : Optional[Any] ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
lowerCamelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : Optional[Any]=1e-7 , _lowerCamelCase : str=1.0 , _lowerCamelCase : Any=-1 ) -> Dict:
lowerCamelCase_ = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCamelCase_ = lr_init - lr_end
lowerCamelCase_ = num_training_steps - num_warmup_steps
lowerCamelCase_ = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCamelCase_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( _lowerCamelCase : Union[str, SchedulerType] , _lowerCamelCase : Optimizer , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : int = -1 , ) -> Dict:
lowerCamelCase_ = SchedulerType(_lowerCamelCase )
lowerCamelCase_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , )
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
| 709
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class a ( unittest.TestCase ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any]=7 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Tuple=30 , __SCREAMING_SNAKE_CASE : str=400 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=1 / 255 , __SCREAMING_SNAKE_CASE : Dict=True , ) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_pad
def UpperCamelCase ( self : List[Any] ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ) -> str:
if not batched:
lowerCamelCase_ = image_inputs[0]
if isinstance(__SCREAMING_SNAKE_CASE , Image.Image ):
lowerCamelCase_ , lowerCamelCase_ = image.size
else:
lowerCamelCase_ , lowerCamelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ = int(self.size['shortest_edge'] * h / w )
lowerCamelCase_ = self.size['shortest_edge']
elif w > h:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = int(self.size['shortest_edge'] * w / h )
else:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = self.size['shortest_edge']
else:
lowerCamelCase_ = []
for image in image_inputs:
lowerCamelCase_ , lowerCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0]
lowerCamelCase_ = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Any = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
lowerCamelCase_ = DeformableDetrImageProcessingTester(self )
@property
def UpperCamelCase ( self : Optional[int] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self : int ) -> str:
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'size' ) )
def UpperCamelCase ( self : Optional[int] ) -> int:
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[int] ) -> List[Any]:
pass
def UpperCamelCase ( self : Union[str, Any] ) -> str:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self : str ) -> Any:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self : Tuple ) -> str:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase ( self : Optional[Any] ) -> str:
# prepare image and target
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {'image_id': 39769, 'annotations': target}
# encode them
lowerCamelCase_ = DeformableDetrImageProcessor()
lowerCamelCase_ = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __SCREAMING_SNAKE_CASE ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __SCREAMING_SNAKE_CASE ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __SCREAMING_SNAKE_CASE ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __SCREAMING_SNAKE_CASE ) )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __SCREAMING_SNAKE_CASE ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __SCREAMING_SNAKE_CASE ) )
@slow
def UpperCamelCase ( self : Tuple ) -> str:
# prepare image, target and masks_path
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
lowerCamelCase_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCamelCase_ = DeformableDetrImageProcessor(format='coco_panoptic' )
lowerCamelCase_ = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , masks_path=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __SCREAMING_SNAKE_CASE ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __SCREAMING_SNAKE_CASE ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __SCREAMING_SNAKE_CASE ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __SCREAMING_SNAKE_CASE ) )
# verify masks
lowerCamelCase_ = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __SCREAMING_SNAKE_CASE )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __SCREAMING_SNAKE_CASE ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __SCREAMING_SNAKE_CASE ) )
| 137
| 0
|
def _a ( lowercase__ : str ):
'''simple docstring'''
if n_term == "":
return []
SCREAMING_SNAKE_CASE__ : list = []
for temp in range(int(lowercase__ ) ):
series.append(f'''1/{temp + 1}''' if series else '1' )
return series
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 85
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__A : List[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __A ( datasets.BuilderConfig ):
lowerCAmelCase_ : Optional[datasets.Features] = None
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, ) -> Union[str, Any]:
'''simple docstring'''
import pyspark
def generate_fn():
lowerCAmelCase : str = df.select('*', pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
lowerCAmelCase : List[Any] = df_with_partition_id.select('*' ).where(f"part_id = {partition_id}" ).drop('part_id' )
lowerCAmelCase : Optional[Any] = partition_df.collect()
lowerCAmelCase : Optional[Any] = 0
for row in rows:
yield f"{partition_id}_{row_id}", row.asDict()
row_id += 1
return generate_fn
class __A ( _BaseExamplesIterable ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : "pyspark.sql.DataFrame" , UpperCAmelCase_ : List[Any]=None , ):
lowerCAmelCase : Optional[Any] = df
lowerCAmelCase : Any = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCAmelCase : Union[str, Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Union[str, Any] ):
yield from self.generate_examples_fn()
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : np.random.Generator ):
lowerCAmelCase : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCAmelCase_ )
return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase_ )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
lowerCAmelCase : List[Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ , UpperCAmelCase_ )
return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase_ )
@property
def lowercase__ ( self : Optional[Any] ):
return len(self.partition_order )
class __A ( datasets.DatasetBuilder ):
lowerCAmelCase_ : Any = SparkConfig
def __init__( self : List[Any] , UpperCAmelCase_ : "pyspark.sql.DataFrame" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : str = None , **UpperCAmelCase_ : Optional[Any] , ):
import pyspark
lowerCAmelCase : Tuple = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCAmelCase : Dict = df
lowerCAmelCase : Dict = working_dir
super().__init__(
cache_dir=UpperCAmelCase_ , config_name=str(self.df.semanticHash() ) , **UpperCAmelCase_ , )
def lowercase__ ( self : int ):
# Returns the path of the created file.
def create_cache_and_write_probe(UpperCAmelCase_ : Union[str, Any] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=UpperCAmelCase_ )
lowerCAmelCase : Any = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCAmelCase_ , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCAmelCase : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCAmelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def lowercase__ ( self : Optional[int] ):
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(UpperCAmelCase_ : Tuple ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
lowerCAmelCase : int = self.df.count()
lowerCAmelCase : str = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCAmelCase : Tuple = (
self.df.limit(UpperCAmelCase_ )
.repartition(1 )
.mapInArrow(UpperCAmelCase_ , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCAmelCase : Tuple = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCAmelCase : List[str] = min(UpperCAmelCase_ , int(approx_total_size / max_shard_size ) )
lowerCAmelCase : List[Any] = self.df.repartition(UpperCAmelCase_ )
def lowercase__ ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int , ):
import pyspark
lowerCAmelCase : Tuple = ParquetWriter if file_format == 'parquet' else ArrowWriter
lowerCAmelCase : int = os.path.join(self._working_dir , os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath
lowerCAmelCase : str = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCAmelCase : int = self.config.features
lowerCAmelCase : Union[str, Any] = self._writer_batch_size
lowerCAmelCase : Tuple = self._fs.storage_options
def write_arrow(UpperCAmelCase_ : int ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCAmelCase : List[Any] = pyspark.TaskContext().taskAttemptId()
lowerCAmelCase : str = next(UpperCAmelCase_ , UpperCAmelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
lowerCAmelCase : str = 0
lowerCAmelCase : Union[str, Any] = writer_class(
features=UpperCAmelCase_ , path=working_fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , writer_batch_size=UpperCAmelCase_ , storage_options=UpperCAmelCase_ , embed_local_files=UpperCAmelCase_ , )
lowerCAmelCase : Union[str, Any] = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCAmelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCAmelCase , lowerCAmelCase : Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
lowerCAmelCase : Tuple = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , writer_batch_size=UpperCAmelCase_ , storage_options=UpperCAmelCase_ , embed_local_files=UpperCAmelCase_ , )
lowerCAmelCase : str = pa.Table.from_batches([batch] )
writer.write_table(UpperCAmelCase_ )
if writer._num_bytes > 0:
lowerCAmelCase , lowerCAmelCase : int = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ):
lowerCAmelCase : Optional[int] = os.path.join(os.path.dirname(UpperCAmelCase_ ) , os.path.basename(UpperCAmelCase_ ) )
shutil.move(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = (
self.df.mapInArrow(UpperCAmelCase_ , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowercase__ ( self : int , UpperCAmelCase_ : "datasets.SplitGenerator" , UpperCAmelCase_ : str = "arrow" , UpperCAmelCase_ : Optional[Union[str, int]] = None , UpperCAmelCase_ : Optional[int] = None , **UpperCAmelCase_ : str , ):
self._validate_cache_dir()
lowerCAmelCase : List[Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = not is_remote_filesystem(self._fs )
lowerCAmelCase : Union[str, Any] = os.path.join if is_local else posixpath.join
lowerCAmelCase : List[Any] = '-TTTTT-SSSSS-of-NNNNN'
lowerCAmelCase : Optional[Any] = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
lowerCAmelCase : int = path_join(self._output_dir , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Dict = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : List[Any] = []
for task_id, content in self._prepare_split_single(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Dict = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCAmelCase_ )
lowerCAmelCase : Dict = total_num_examples
lowerCAmelCase : List[Any] = total_num_bytes
# should rename everything at the end
logger.debug(f"Renaming {total_shards} shards." )
if total_shards > 1:
lowerCAmelCase : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCAmelCase : str = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , ):
rename(
UpperCAmelCase_ , fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , fpath.replace('TTTTT-SSSSS' , f"{global_shard_id:05d}" ).replace('NNNNN' , f"{total_shards:05d}" ) , )
lowerCAmelCase : int = []
lowerCAmelCase : List[Any] = 0
for i in range(len(UpperCAmelCase_ ) ):
lowerCAmelCase , lowerCAmelCase : Dict = task_id_and_num_shards[i]
for shard_id in range(UpperCAmelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCAmelCase_ , len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect()
else:
# don't use any pattern
lowerCAmelCase : int = 0
lowerCAmelCase : Union[str, Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , fpath.replace(UpperCAmelCase_ , '' ) , )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 343
| 0
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list:
UpperCamelCase = len(__UpperCamelCase )
UpperCamelCase = []
for i in range(len(__UpperCamelCase ) - pat_len + 1 ):
UpperCamelCase = True
for j in range(__UpperCamelCase ):
if s[i + j] != pattern[j]:
UpperCamelCase = False
break
if match_found:
position.append(__UpperCamelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 704
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a_ ( lowerCamelCase ):
lowercase = """deformable_detr"""
lowercase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.2_5 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCamelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = backbone_config.get("""model_type""" )
UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase = config_class.from_dict(_SCREAMING_SNAKE_CASE )
UpperCamelCase = use_timm_backbone
UpperCamelCase = backbone_config
UpperCamelCase = num_channels
UpperCamelCase = num_queries
UpperCamelCase = max_position_embeddings
UpperCamelCase = d_model
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = init_xavier_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = auxiliary_loss
UpperCamelCase = position_embedding_type
UpperCamelCase = backbone
UpperCamelCase = use_pretrained_backbone
UpperCamelCase = dilation
# deformable attributes
UpperCamelCase = num_feature_levels
UpperCamelCase = encoder_n_points
UpperCamelCase = decoder_n_points
UpperCamelCase = two_stage
UpperCamelCase = two_stage_num_proposals
UpperCamelCase = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
UpperCamelCase = class_cost
UpperCamelCase = bbox_cost
UpperCamelCase = giou_cost
# Loss coefficients
UpperCamelCase = mask_loss_coefficient
UpperCamelCase = dice_loss_coefficient
UpperCamelCase = bbox_loss_coefficient
UpperCamelCase = giou_loss_coefficient
UpperCamelCase = eos_coefficient
UpperCamelCase = focal_alpha
UpperCamelCase = disable_custom_kernels
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def A__ ( self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def A__ ( self ) -> int:
"""simple docstring"""
return self.d_model
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase = self.backbone_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output
| 35
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"vocab_file": "spiece.model"}
UpperCamelCase = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
UpperCamelCase = {"bert_for_seq_generation": 512}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[int] = []
_UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<::::>" , _lowerCAmelCase = None , **_lowerCAmelCase , ):
_lowercase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
_lowercase : Tuple = vocab_file
_lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@property
def __a ( self ):
return self.sp_model.get_piece_size()
def __a ( self ):
_lowercase : Union[str, Any] = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowercase : Optional[int] = self.__dict__.copy()
_lowercase : Optional[int] = None
return state
def __setstate__( self , _lowerCAmelCase ):
_lowercase : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowercase : List[str] = {}
_lowercase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self , _lowerCAmelCase ):
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
return self.sp_model.piece_to_id(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : int = self.sp_model.IdToPiece(_lowerCAmelCase )
return token
def __a ( self , _lowerCAmelCase ):
_lowercase : str = []
_lowercase : Union[str, Any] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
_lowercase : Dict = []
else:
current_sub_tokens.append(_lowerCAmelCase )
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowercase : str = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , 'wb' ) as fi:
_lowercase : Tuple = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 66
|
from __future__ import annotations
UpperCamelCase = tuple[int, int, int]
UpperCamelCase = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
UpperCamelCase = "EGZWVONAHDCLFQMSIPJBYUKXTR"
UpperCamelCase = "FOBHMDKEXQNRAULPGSJVTYICZW"
UpperCamelCase = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
UpperCamelCase = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
UpperCamelCase = "RMDJXFUWGISLHVTCQNKYPBEZOA"
UpperCamelCase = "SGLCPQWZHKXAREONTFBVIYJUDM"
UpperCamelCase = "HVSICLTYKQUBXDWAJZOMFGPREN"
UpperCamelCase = "RZWQHFMVDBKICJLNTUXAGYPSOE"
UpperCamelCase = "LFKIJODBEGAMQPXVUHYSTCZRWN"
UpperCamelCase = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(SCREAMING_SNAKE_CASE ) )) < 3:
_lowercase : Optional[int] = F"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(SCREAMING_SNAKE_CASE )
# Checks if rotor positions are valid
_lowercase , _lowercase , _lowercase : int = rotpos
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
_lowercase : Dict = F"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
_lowercase : int = F"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
_lowercase : str = F"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Validates string and returns dict
_lowercase : Tuple = _plugboard(SCREAMING_SNAKE_CASE )
return rotpos, rotsel, pbdict
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = F"""Plugboard setting isn't type string ({type(SCREAMING_SNAKE_CASE )})"""
raise TypeError(SCREAMING_SNAKE_CASE )
elif len(SCREAMING_SNAKE_CASE ) % 2 != 0:
_lowercase : Optional[int] = F"""Odd number of symbols ({len(SCREAMING_SNAKE_CASE )})"""
raise Exception(SCREAMING_SNAKE_CASE )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
_lowercase : Dict = set()
for i in pbstring:
if i not in abc:
_lowercase : str = F"""'{i}' not in list of symbols"""
raise Exception(SCREAMING_SNAKE_CASE )
elif i in tmppbl:
_lowercase : int = F"""Duplicate symbol ({i})"""
raise Exception(SCREAMING_SNAKE_CASE )
else:
tmppbl.add(SCREAMING_SNAKE_CASE )
del tmppbl
# Created the dictionary
_lowercase : Optional[Any] = {}
for j in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 , 2 ):
_lowercase : Dict = pbstring[j + 1]
_lowercase : Union[str, Any] = pbstring[j]
return pb
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (rotora, rotora, rotora) , SCREAMING_SNAKE_CASE = "" , ) -> str:
_lowercase : List[str] = text.upper()
_lowercase , _lowercase , _lowercase : List[str] = _validator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , plugb.upper() )
_lowercase , _lowercase , _lowercase : Optional[int] = rotor_position
_lowercase , _lowercase , _lowercase : Union[str, Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_lowercase : Optional[int] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_lowercase : Dict = plugboard[symbol]
# rotor ra --------------------------
_lowercase : Optional[Any] = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
_lowercase : Union[str, Any] = rotora[index % len(SCREAMING_SNAKE_CASE )]
# rotor rb --------------------------
_lowercase : Tuple = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
_lowercase : str = rotora[index % len(SCREAMING_SNAKE_CASE )]
# rotor rc --------------------------
_lowercase : List[Any] = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
_lowercase : List[str] = rotora[index % len(SCREAMING_SNAKE_CASE )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_lowercase : List[str] = reflector[symbol]
# 2nd rotors
_lowercase : List[str] = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
_lowercase : Tuple = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
_lowercase : Dict = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_lowercase : int = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
_lowercase : Any = 0
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
_lowercase : int = 0
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
_lowercase : Any = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = "This is my Python script that emulates the Enigma machine from WWII."
UpperCamelCase = (1, 1, 1)
UpperCamelCase = "pictures"
UpperCamelCase = (rotora, rotora, rotora)
UpperCamelCase = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 66
| 1
|
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowercase : Dict = True
except ImportError:
_lowercase : Any = False
_lowercase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase ( UpperCAmelCase__ : Namespace ) -> List[str]:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class __magic_name__ ( _UpperCAmelCase):
@staticmethod
def SCREAMING_SNAKE_CASE_ ( lowercase_ : ArgumentParser ):
lowercase_ : int = parser.add_parser("""add-new-model""" )
add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" )
add_new_model_parser.add_argument("""--testing_file""" , type=lowercase_ , help="""Configuration file on which to run.""" )
add_new_model_parser.add_argument(
"""--path""" , type=lowercase_ , help="""Path to cookiecutter. Should only be used for testing purposes.""" )
add_new_model_parser.set_defaults(func=lowercase_ )
def __init__( self : Optional[int] , lowercase_ : bool , lowercase_ : str , lowercase_ : Union[str, Any]=None , *lowercase_ : Dict ):
lowercase_ : Union[str, Any] = testing
lowercase_ : List[Any] = testing_file
lowercase_ : int = path
def SCREAMING_SNAKE_CASE_ ( self : Any ):
warnings.warn(
"""The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """
"""It is not actively maintained anymore, so might give a result that won't pass all tests and quality """
"""checks, you should use `transformers-cli add-new-model-like` instead.""" )
if not _has_cookiecutter:
raise ImportError(
"""Model creation dependencies are required to use the `add_new_model` command. Install them by running """
"""the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowercase_ : List[Any] = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]]
if len(lowercase_ ) > 0:
raise ValueError(
"""Several directories starting with `cookiecutter-template-` in current working directory. """
"""Please clean your directory by removing all folders starting with `cookiecutter-template-` or """
"""change your working directory.""" )
lowercase_ : str = (
Path(lowercase_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowercase_ : List[Any] = path_to_transformer_root / """templates""" / """adding_a_new_model"""
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowercase_ ) )
else:
with open(self._testing_file , """r""" ) as configuration_file:
lowercase_ : Union[str, Any] = json.load(lowercase_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowercase_ , extra_context=lowercase_ , )
lowercase_ : List[Any] = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0]
# Retrieve configuration
with open(directory + """/configuration.json""" , """r""" ) as configuration_file:
lowercase_ : Optional[int] = json.load(lowercase_ )
lowercase_ : List[Any] = configuration["""lowercase_modelname"""]
lowercase_ : List[Any] = configuration["""generate_tensorflow_pytorch_and_flax"""]
os.remove(f'''{directory}/configuration.json''' )
lowercase_ : Any = """PyTorch""" in generate_tensorflow_pytorch_and_flax
lowercase_ : Tuple = """TensorFlow""" in generate_tensorflow_pytorch_and_flax
lowercase_ : Any = """Flax""" in generate_tensorflow_pytorch_and_flax
lowercase_ : List[str] = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(lowercase_ , exist_ok=lowercase_ )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=lowercase_ )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , """w""" ):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(lowercase_ : int ):
with open(lowercase_ , """r""" ) as f:
lowercase_ : Union[str, Any] = f.readlines()
with open(lowercase_ , """w""" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowercase_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowercase_ : str , lowercase_ : str , lowercase_ : List[str] ):
# Create temp file
lowercase_ , lowercase_ : Any = mkstemp()
lowercase_ : List[str] = False
with fdopen(lowercase_ , """w""" ) as new_file:
with open(lowercase_ ) as old_file:
for line in old_file:
new_file.write(lowercase_ )
if line_to_copy_below in line:
lowercase_ : int = True
for line_to_copy in lines_to_copy:
new_file.write(lowercase_ )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(lowercase_ , lowercase_ )
# Remove original file
remove(lowercase_ )
# Move new file
move(lowercase_ , lowercase_ )
def skip_units(lowercase_ : str ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowercase_ : int ):
with open(lowercase_ ) as datafile:
lowercase_ : Optional[int] = []
lowercase_ : List[Any] = False
lowercase_ : Dict = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowercase_ : Optional[Any] = line.split("""\"""" )[1]
lowercase_ : Optional[int] = skip_units(lowercase_ )
elif "# Below: " in line and "##" not in line:
lowercase_ : List[str] = line.split("""\"""" )[1]
lowercase_ : Union[str, Any] = skip_units(lowercase_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : int = []
elif "# Replace with" in line and "##" not in line:
lowercase_ : Optional[int] = []
elif "##" not in line:
lines_to_copy.append(lowercase_ )
remove(lowercase_ )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(lowercase_ )
| 30
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Dict = (1 - _cos) / 2
lowercase_ : Optional[int] = 1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Optional[int] = sin(UpperCAmelCase__ )
lowercase_ : Dict = cos(UpperCAmelCase__ )
lowercase_ : Optional[int] = _sin / (2 * q_factor)
lowercase_ : Dict = (1 + _cos) / 2
lowercase_ : str = -1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : List[Any] = 1 - alpha
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : int = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ )
lowercase_ : str = _sin / (2 * q_factor)
lowercase_ : str = _sin / 2
lowercase_ : Any = 0
lowercase_ : Optional[Any] = -ba
lowercase_ : Dict = 1 + alpha
lowercase_ : Union[str, Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : List[str] = tau * frequency / samplerate
lowercase_ : Any = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : Optional[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 1 - alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : Optional[int] = 1 + alpha
lowercase_ : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : List[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : List[str] = 1 + alpha * big_a
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Dict = 1 - alpha * big_a
lowercase_ : str = 1 + alpha / big_a
lowercase_ : List[str] = -2 * _cos
lowercase_ : Tuple = 1 - alpha / big_a
lowercase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ )
lowercase_ : Any = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (pmc + aaa)
lowercase_ : List[str] = 2 * big_a * mpc
lowercase_ : Union[str, Any] = big_a * (pmc - aaa)
lowercase_ : Optional[int] = ppmc + aaa
lowercase_ : Optional[int] = -2 * pmpc
lowercase_ : Any = ppmc - aaa
lowercase_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Dict = _sin / (2 * q_factor)
lowercase_ : Union[str, Any] = 10 ** (gain_db / 40)
lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (ppmc + aaa)
lowercase_ : List[Any] = -2 * big_a * pmpc
lowercase_ : Optional[Any] = big_a * (ppmc - aaa)
lowercase_ : Optional[Any] = pmc + aaa
lowercase_ : int = 2 * mpc
lowercase_ : Tuple = pmc - aaa
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 30
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
_lowerCAmelCase = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
_lowerCAmelCase = {
'''RUCAIBox/mvp''': 1024,
}
class _SCREAMING_SNAKE_CASE ( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE :List[str] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE :str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE :Optional[int] = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE :List[Any] = MvpTokenizer
def __init__( self : Tuple , a__ : List[str]=None , a__ : Optional[Any]=None , a__ : List[Any]=None , a__ : Optional[int]="replace" , a__ : List[Any]="<s>" , a__ : Optional[Any]="</s>" , a__ : int="</s>" , a__ : Any="<s>" , a__ : Union[str, Any]="<unk>" , a__ : Optional[Any]="<pad>" , a__ : Any="<mask>" , a__ : Union[str, Any]=False , a__ : Dict=True , **a__ : Optional[Any] , ):
super().__init__(
__A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , )
__magic_name__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __A ) != add_prefix_space:
__magic_name__ = getattr(__A , pre_tok_state.pop('''type''' ) )
__magic_name__ = add_prefix_space
__magic_name__ = pre_tok_class(**__A )
__magic_name__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__magic_name__ = '''post_processor'''
__magic_name__ = getattr(self.backend_tokenizer , __A , __A )
if tokenizer_component_instance:
__magic_name__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__magic_name__ = tuple(state['''sep'''] )
if "cls" in state:
__magic_name__ = tuple(state['''cls'''] )
__magic_name__ = False
if state.get('''add_prefix_space''' , __A ) != add_prefix_space:
__magic_name__ = add_prefix_space
__magic_name__ = True
if state.get('''trim_offsets''' , __A ) != trim_offsets:
__magic_name__ = trim_offsets
__magic_name__ = True
if changes_to_apply:
__magic_name__ = getattr(__A , state.pop('''type''' ) )
__magic_name__ = component_class(**__A )
setattr(self.backend_tokenizer , __A , __A )
@property
def snake_case__ ( self : int ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case__ ( self : List[str] , a__ : Union[str, Any] ):
__magic_name__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value
__magic_name__ = value
def snake_case__ ( self : List[str] , *a__ : List[Any] , **a__ : Union[str, Any] ):
__magic_name__ = kwargs.get('''is_split_into_words''' , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__A , **__A )
def snake_case__ ( self : Tuple , *a__ : Union[str, Any] , **a__ : List[Any] ):
__magic_name__ = kwargs.get('''is_split_into_words''' , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__A , **__A )
def snake_case__ ( self : int , a__ : str , a__ : Optional[str] = None ):
__magic_name__ = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def snake_case__ ( self : Dict , a__ : str , a__ : Optional[int]=None ):
__magic_name__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case__ ( self : str , a__ : List[int] , a__ : Optional[List[int]] = None ):
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 432
|
'''simple docstring'''
from __future__ import annotations
from random import random
class snake_case :
"""simple docstring"""
def __init__( self : Tuple , __A : int | None = None ):
__UpperCamelCase = value
__UpperCamelCase = random()
__UpperCamelCase = None
__UpperCamelCase = None
def __repr__( self : List[str] ):
from pprint import pformat
if self.left is None and self.right is None:
return f'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 )
def __str__( self : List[str] ):
__UpperCamelCase = str(self.value ) + ' '
__UpperCamelCase = str(self.left or '' )
__UpperCamelCase = str(self.right or '' )
return value + left + right
def lowercase__ ( __lowercase : Node | None , __lowercase : int ) -> tuple[Node | None, Node | None]:
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__UpperCamelCase , __UpperCamelCase = split(root.left , __lowercase )
return left, root
else:
__UpperCamelCase , __UpperCamelCase = split(root.right , __lowercase )
return root, right
def lowercase__ ( __lowercase : Node | None , __lowercase : Node | None ) -> Node | None:
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__UpperCamelCase = merge(left.right , __lowercase )
return left
else:
__UpperCamelCase = merge(__lowercase , right.left )
return right
def lowercase__ ( __lowercase : Node | None , __lowercase : int ) -> Node | None:
"""simple docstring"""
__UpperCamelCase = Node(__lowercase )
__UpperCamelCase , __UpperCamelCase = split(__lowercase , __lowercase )
return merge(merge(__lowercase , __lowercase ) , __lowercase )
def lowercase__ ( __lowercase : Node | None , __lowercase : int ) -> Node | None:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase = split(__lowercase , value - 1 )
__UpperCamelCase , __UpperCamelCase = split(__lowercase , __lowercase )
return merge(__lowercase , __lowercase )
def lowercase__ ( __lowercase : Node | None ) -> None:
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowercase__ ( __lowercase : Node | None , __lowercase : str ) -> Node | None:
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
__UpperCamelCase = insert(__lowercase , int(arg[1:] ) )
elif arg[0] == "-":
__UpperCamelCase = erase(__lowercase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowercase__ ( ) -> None:
"""simple docstring"""
__UpperCamelCase = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
__UpperCamelCase = input()
while args != "q":
__UpperCamelCase = interact_treap(__lowercase , __lowercase )
print(__lowercase )
__UpperCamelCase = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 399
| 0
|
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowercase ( unittest.TestCase):
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : int = '''hf-internal-testing/tiny-random-t5'''
A_ : List[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
A_ : Dict = AutoModelForSeqaSeqLM.from_pretrained(_lowerCamelCase )
A_ : Tuple = tokenizer('''This is me''' , return_tensors='''pt''' )
A_ : Union[str, Any] = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
A_ : Optional[int] = model.generate(**_lowerCamelCase )
A_ : Optional[int] = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase )
A_ : Dict = AutoModelForSeqaSeqLM.from_pretrained(_lowerCamelCase )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
A_ : Union[str, Any] = model_reloaded.generate(**_lowerCamelCase )
self.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase ) )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : List[str] = '''hf-internal-testing/tiny-random-t5'''
A_ : Any = AutoModelForSeqaSeqLM.from_pretrained(_lowerCamelCase )
A_ : Dict = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_lowerCamelCase ):
model.save_pretrained(_lowerCamelCase )
A_ : List[str] = model.reverse_bettertransformer()
model.save_pretrained(_lowerCamelCase )
| 361
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_lowerCamelCase : Optional[Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class lowercase ( unittest.TestCase):
__lowerCAmelCase : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__lowerCAmelCase : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__lowerCAmelCase : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__lowerCAmelCase : List[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def a_ ( self : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : int = ZeroShotClassificationPipeline(
model=_lowerCamelCase , tokenizer=_lowerCamelCase , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def a_ ( self : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : List[Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase )]} )
# No kwarg
A_ : Tuple = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase )]} )
A_ : List[Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase )]} )
A_ : Union[str, Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
A_ : Tuple = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
A_ : List[str] = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
A_ : str = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )]}
for i in range(1 )
] , )
A_ : str = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )]}
for i in range(2 )
] , )
with self.assertRaises(_lowerCamelCase ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(_lowerCamelCase ):
classifier(_lowerCamelCase , candidate_labels='''politics''' )
with self.assertRaises(_lowerCamelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(_lowerCamelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=_lowerCamelCase )
with self.assertRaises(_lowerCamelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(_lowerCamelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=_lowerCamelCase , )
self.run_entailment_id(_lowerCamelCase )
def a_ ( self : Any , _lowerCamelCase : Pipeline ):
"""simple docstring"""
A_ : int = zero_shot_classifier.model.config
A_ : Dict = config.labelaid
A_ : Optional[int] = zero_shot_classifier.entailment_id
A_ : Optional[Any] = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
A_ : Union[str, Any] = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A_ : int = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A_ : Optional[Any] = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
A_ : List[Any] = original_labelaid
self.assertEqual(_lowerCamelCase , zero_shot_classifier.entailment_id )
@require_torch
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 1_00 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def a_ ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
A_ : Optional[Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def a_ ( self : Dict ):
"""simple docstring"""
A_ : List[str] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
A_ : List[str] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def a_ ( self : int ):
"""simple docstring"""
A_ : Union[str, Any] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
A_ : str = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
A_ : str = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_lowerCamelCase , )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Tuple = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
A_ : str = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
A_ : Tuple = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_lowerCamelCase , )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 361
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__ = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
__magic_name__ = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
class SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
"""simple docstring"""
a_ : Optional[Any] =VOCAB_FILES_NAMES
a_ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
a_ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] =["""input_ids""", """attention_mask"""]
a_ : List[Any] =BartTokenizer
def __init__( self : Union[str, Any] , _snake_case : str=None , _snake_case : Union[str, Any]=None , _snake_case : List[str]=None , _snake_case : Dict="replace" , _snake_case : Optional[int]="<s>" , _snake_case : Optional[int]="</s>" , _snake_case : str="</s>" , _snake_case : int="<s>" , _snake_case : Any="<unk>" , _snake_case : Tuple="<pad>" , _snake_case : int="<mask>" , _snake_case : List[str]=False , _snake_case : Optional[Any]=True , **_snake_case : int , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , errors=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ , **lowercase__ , )
a__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase__ ) != add_prefix_space:
a__ = getattr(lowercase__ , pre_tok_state.pop('type' ) )
a__ = add_prefix_space
a__ = pre_tok_class(**lowercase__ )
a__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a__ = """post_processor"""
a__ = getattr(self.backend_tokenizer , lowercase__ , lowercase__ )
if tokenizer_component_instance:
a__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ = tuple(state['sep'] )
if "cls" in state:
a__ = tuple(state['cls'] )
a__ = False
if state.get('add_prefix_space' , lowercase__ ) != add_prefix_space:
a__ = add_prefix_space
a__ = True
if state.get('trim_offsets' , lowercase__ ) != trim_offsets:
a__ = trim_offsets
a__ = True
if changes_to_apply:
a__ = getattr(lowercase__ , state.pop('type' ) )
a__ = component_class(**lowercase__ )
setattr(self.backend_tokenizer , lowercase__ , lowercase__ )
@property
def _lowerCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCAmelCase ( self : List[Any] , _snake_case : Union[str, Any] ) -> str:
'''simple docstring'''
a__ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else value
a__ = value
def _lowerCAmelCase ( self : Any , *_snake_case : Any , **_snake_case : str ) -> int:
'''simple docstring'''
a__ = kwargs.get('is_split_into_words' , lowercase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase__ , **lowercase__ )
def _lowerCAmelCase ( self : List[str] , *_snake_case : Dict , **_snake_case : Optional[Any] ) -> List[str]:
'''simple docstring'''
a__ = kwargs.get('is_split_into_words' , lowercase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase__ , **lowercase__ )
def _lowerCAmelCase ( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : Any = None ) -> Dict:
'''simple docstring'''
a__ = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def _lowerCAmelCase ( self : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any=None ) -> Optional[int]:
'''simple docstring'''
a__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self : List[Any] , _snake_case : List[str] , _snake_case : Optional[Any] = None ) -> List[str]:
'''simple docstring'''
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 232
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 2_0_0_0_0_0_0 ):
"""simple docstring"""
snake_case_ : Optional[Any] = [0 for i in range(n + 1 )]
snake_case_ : int = 1
snake_case_ : str = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Optional[int] = 1
snake_case_ : Any = 0
for i in range(SCREAMING_SNAKE_CASE__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 480
| 0
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_A : List[str] = logging.getLogger()
def __lowerCAmelCase ( ) -> int:
__lowerCamelCase: List[str] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
__lowerCamelCase: str = parser.parse_args()
return args.f
class a ( UpperCamelCase_ ):
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
__lowerCamelCase: Optional[int] = logging.StreamHandler(sys.stdout )
logger.addHandler(_a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ):
__lowerCamelCase: List[str] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(_a , """argv""" , _a ):
__lowerCamelCase: Dict = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_a , 0.666 )
@slow
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : int ):
__lowerCamelCase: Optional[Any] = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_a )
__lowerCamelCase: int = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
__lowerCamelCase: List[str] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
| 712
|
class a :
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : str = "" , SCREAMING_SNAKE_CASE_ : bool = False ):
# Mapping from the first character of the prefix of the node
__lowerCamelCase: dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
__lowerCamelCase: str = is_leaf
__lowerCamelCase: Optional[int] = prefix
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
__lowerCamelCase: Optional[Any] = 0
for q, w in zip(self.prefix , SCREAMING_SNAKE_CASE_ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : list[str] ):
for word in words:
self.insert(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
__lowerCamelCase: Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__lowerCamelCase: Any = RadixNode(prefix=SCREAMING_SNAKE_CASE_ , is_leaf=SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase: Union[str, Any] = self.nodes[word[0]]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase: List[str] = incoming_node.match(
SCREAMING_SNAKE_CASE_ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE_ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__lowerCamelCase: List[Any] = remaining_prefix
__lowerCamelCase: Optional[Any] = self.nodes[matching_string[0]]
__lowerCamelCase: Optional[int] = RadixNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Union[str, Any] = aux_node
if remaining_word == "":
__lowerCamelCase: Optional[int] = True
else:
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : str ):
__lowerCamelCase: int = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE_ )
if not incoming_node:
return False
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase: Any = incoming_node.match(
SCREAMING_SNAKE_CASE_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : str ):
__lowerCamelCase: str = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE_ )
if not incoming_node:
return False
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase: Dict = incoming_node.match(
SCREAMING_SNAKE_CASE_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(SCREAMING_SNAKE_CASE_ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__lowerCamelCase: List[Any] = list(self.nodes.values() )[0]
__lowerCamelCase: Any = merging_node.is_leaf
self.prefix += merging_node.prefix
__lowerCamelCase: Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__lowerCamelCase: int = False
# If there is 1 edge, we merge it with its child
else:
__lowerCamelCase: Union[str, Any] = list(incoming_node.nodes.values() )[0]
__lowerCamelCase: List[str] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__lowerCamelCase: Union[str, Any] = merging_node.nodes
return True
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int = 0 ):
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __lowerCAmelCase ( ) -> bool:
__lowerCamelCase: Optional[int] = """banana bananas bandana band apple all beast""".split()
__lowerCamelCase: Optional[Any] = RadixNode()
root.insert_many(snake_case )
assert all(root.find(snake_case ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def __lowerCAmelCase ( ) -> None:
assert test_trie()
def __lowerCAmelCase ( ) -> None:
__lowerCamelCase: int = RadixNode()
__lowerCamelCase: str = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(snake_case )
print("""Words:""" , snake_case )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 189
| 0
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _lowerCAmelCase ( __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
if isinstance(__lowerCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class a :
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :int ,__lowercase :Tuple ):
pass
def __lowerCamelCase ( self :Optional[int] ):
pass
def __lowerCamelCase ( self :Optional[Any] ):
pass
def __lowerCamelCase ( self :str ,__lowercase :int ,__lowercase :Any ,__lowercase :Optional[Any] ):
snake_case__ : Any = np.abs((a - b) ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __lowerCamelCase ( self :Tuple ,__lowercase :Optional[int] ,__lowercase :str ,__lowercase :Optional[int] ,__lowercase :Optional[int] ,__lowercase :str=None ,**__lowercase :Optional[int] ):
snake_case__ : int = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
snake_case__ : str = FlaxVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = model(input_ids=__SCREAMING_SNAKE_CASE ,pixel_values=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE )
self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], config.projection_dim) )
def __lowerCamelCase ( self :str ,__lowercase :List[Any] ,__lowercase :Any ,__lowercase :Optional[Any] ,__lowercase :int ,__lowercase :Any=None ,**__lowercase :str ):
snake_case__ , snake_case__ : Union[str, Any] = self.get_vision_text_model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
snake_case__ : str = {'''vision_model''': vision_model, '''text_model''': text_model}
snake_case__ : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = model(input_ids=__SCREAMING_SNAKE_CASE ,pixel_values=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE )
self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def __lowerCamelCase ( self :List[Any] ,__lowercase :List[str] ,__lowercase :int ,__lowercase :Optional[Any] ,__lowercase :Dict ,__lowercase :Any=None ,**__lowercase :List[Any] ):
snake_case__ , snake_case__ : str = self.get_vision_text_model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
snake_case__ : str = {'''vision_model''': vision_model, '''text_model''': text_model}
snake_case__ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE )
snake_case__ : int = model(input_ids=__SCREAMING_SNAKE_CASE ,pixel_values=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : int = model(input_ids=__SCREAMING_SNAKE_CASE ,pixel_values=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = after_output[0]
snake_case__ : Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__SCREAMING_SNAKE_CASE ,1e-3 )
def __lowerCamelCase ( self :Optional[int] ,__lowercase :Dict ,__lowercase :int ,__lowercase :Optional[Any] ,__lowercase :Union[str, Any] ,__lowercase :str=None ,**__lowercase :int ):
snake_case__ , snake_case__ : List[Any] = self.get_vision_text_model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
snake_case__ : int = {'''vision_model''': vision_model, '''text_model''': text_model}
snake_case__ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = model(
input_ids=__SCREAMING_SNAKE_CASE ,pixel_values=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,output_attentions=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = output.vision_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ : int = to_atuple(vision_model.config.image_size )
snake_case__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
snake_case__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
snake_case__ : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
snake_case__ : List[Any] = output.text_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def __lowerCamelCase ( self :Tuple ,__lowercase :Optional[int] ,__lowercase :List[Any] ,__lowercase :Optional[int] ):
pt_model.to(__SCREAMING_SNAKE_CASE )
pt_model.eval()
# prepare inputs
snake_case__ : Dict = inputs_dict
snake_case__ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
snake_case__ : int = pt_model(**__SCREAMING_SNAKE_CASE ).to_tuple()
snake_case__ : Any = fx_model(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,len(__SCREAMING_SNAKE_CASE ) ,'''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(__SCREAMING_SNAKE_CASE ,pt_output.numpy() ,4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE ,from_pt=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = fx_model_loaded(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,len(__SCREAMING_SNAKE_CASE ) ,'''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(__SCREAMING_SNAKE_CASE ,pt_output.numpy() ,4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = VisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE ,from_flax=__SCREAMING_SNAKE_CASE )
pt_model_loaded.to(__SCREAMING_SNAKE_CASE )
pt_model_loaded.eval()
with torch.no_grad():
snake_case__ : Tuple = pt_model_loaded(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,len(__SCREAMING_SNAKE_CASE ) ,'''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] ,pt_outputs_loaded[:4] ):
self.assert_almost_equals(__SCREAMING_SNAKE_CASE ,pt_output_loaded.numpy() ,4e-2 )
def __lowerCamelCase ( self :List[str] ,__lowercase :int ,__lowercase :Optional[int] ,__lowercase :Tuple ):
snake_case__ : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
snake_case__ : int = VisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = FlaxVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = fx_state
self.check_pt_flax_equivalence(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :Tuple ,__lowercase :Any ,__lowercase :List[str] ):
snake_case__ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
snake_case__ : int = VisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = FlaxVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE ,fx_model.params )
self.check_pt_flax_equivalence(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self :int ):
snake_case__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**__SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self :str ):
snake_case__ : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __lowerCamelCase ( self :str ):
snake_case__ : int = self.prepare_config_and_inputs()
snake_case__ : Optional[Any] = config_inputs_dict.pop('''vision_config''' )
snake_case__ : str = config_inputs_dict.pop('''text_config''' )
snake_case__ : List[Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.check_equivalence_flax_to_pt(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ , snake_case__ : List[Any] = self.get_pretrained_model_and_inputs()
snake_case__ : Optional[int] = model_a(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : int = FlaxVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : str = model_a(**__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = after_outputs[0]
snake_case__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__SCREAMING_SNAKE_CASE ,1e-5 )
@require_flax
class a ( lowerCAmelCase_ , unittest.TestCase ):
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' ,'''hf-internal-testing/tiny-bert''' ,vision_from_pt=__SCREAMING_SNAKE_CASE ,text_from_pt=__SCREAMING_SNAKE_CASE ,)
snake_case__ : List[str] = 1_3
snake_case__ : Tuple = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
snake_case__ : List[Any] = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
snake_case__ : Optional[Any] = random_attention_mask([batch_size, 4] )
snake_case__ : Union[str, Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __lowerCamelCase ( self :Tuple ,__lowercase :int ,__lowercase :str ):
snake_case__ : Tuple = FlaxViTModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = FlaxBertModel(__SCREAMING_SNAKE_CASE )
return vision_model, text_model
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Tuple = FlaxViTModelTester(self )
snake_case__ : Tuple = FlaxBertModelTester(self )
snake_case__ : List[str] = vit_model_tester.prepare_config_and_inputs()
snake_case__ : Tuple = bert_model_tester.prepare_config_and_inputs()
snake_case__ , snake_case__ : List[str] = vision_config_and_inputs
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class a ( lowerCAmelCase_ , unittest.TestCase ):
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' ,'''hf-internal-testing/tiny-bert''' ,vision_from_pt=__SCREAMING_SNAKE_CASE ,text_from_pt=__SCREAMING_SNAKE_CASE ,)
snake_case__ : int = 1_3
snake_case__ : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
snake_case__ : Optional[int] = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
snake_case__ : Dict = random_attention_mask([batch_size, 4] )
snake_case__ : Dict = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __lowerCamelCase ( self :str ,__lowercase :List[Any] ,__lowercase :Union[str, Any] ):
snake_case__ : Any = FlaxCLIPVisionModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = FlaxBertModel(__SCREAMING_SNAKE_CASE )
return vision_model, text_model
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[str] = FlaxCLIPVisionModelTester(self )
snake_case__ : Tuple = FlaxBertModelTester(self )
snake_case__ : str = clip_model_tester.prepare_config_and_inputs()
snake_case__ : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
snake_case__ , snake_case__ : List[Any] = vision_config_and_inputs
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class a ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' ,logit_scale_init_value=1.0 )
snake_case__ : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
snake_case__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
snake_case__ : Optional[Any] = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] ,images=__SCREAMING_SNAKE_CASE ,padding=__SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
snake_case__ : Tuple = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,)
snake_case__ : Union[str, Any] = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image ,__SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
| 252
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
__lowerCAmelCase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
__lowerCAmelCase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
__lowerCAmelCase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
__lowerCAmelCase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
__lowerCAmelCase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
__lowerCAmelCase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
__lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__lowerCAmelCase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__lowerCAmelCase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
__lowerCAmelCase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
__lowerCAmelCase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Dict:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase , __lowerCAmelCase = int(key_split[2] ), int(key_split[4] )
__lowerCAmelCase = config.vision_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase = int(key_split[3] )
__lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = rename_key(lowercase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__lowerCAmelCase = val.squeeze_()
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ) -> List[Any]:
__lowerCAmelCase = GroupViTConfig()
__lowerCAmelCase = GroupViTModel(lowercase ).eval()
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )["""model"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0)
# verify result
__lowerCAmelCase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowercase , padding=lowercase , return_tensors="""pt""" )
with torch.no_grad():
__lowerCAmelCase = model(**lowercase )
if model_name == "groupvit-gcc-yfcc":
__lowerCAmelCase = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
__lowerCAmelCase = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowercase , atol=1e-3 )
processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
print("""Successfully saved processor and model to""" , lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowercase , organization="""nielsr""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_a : List[str] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689
| 0
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__UpperCAmelCase :Optional[int] = ["bert-base-uncased", "bert-base-cased"]
__UpperCAmelCase :str = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class a ( tf.keras.Model ):
"""simple docstring"""
def __init__( self : List[str] , snake_case : List[str] ) -> str:
super().__init__()
__UpperCAmelCase : List[str] = tokenizer
__UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(snake_case )
__UpperCAmelCase : int = TFAutoModel.from_config(snake_case )
def lowerCamelCase__ ( self : List[Any] , snake_case : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[Any] = self.tokenizer(snake_case )
__UpperCAmelCase : Optional[Any] = self.bert(**snake_case )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class a ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
super().setUp()
__UpperCAmelCase : Tuple = [
BertTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
__UpperCAmelCase : Any = [TFBertTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case , use_fast_bert_tokenizer=snake_case )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__UpperCAmelCase : Optional[int] = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__UpperCAmelCase : Optional[int] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
__UpperCAmelCase : Any = tokenizer(snake_case , return_tensors='''tf''' , padding='''longest''' )
__UpperCAmelCase : Optional[int] = tf_tokenizer(snake_case )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def lowerCamelCase__ ( self : List[Any] ) -> str:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase : Any = tf_tokenizer(self.paired_sentences )
__UpperCAmelCase : Union[str, Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase : Optional[int] = tf.function(snake_case )
for test_inputs in (self.test_sentences, self.paired_sentences):
__UpperCAmelCase : int = tf.constant(snake_case )
__UpperCAmelCase : Tuple = compiled_tokenizer(snake_case )
__UpperCAmelCase : Optional[int] = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCamelCase__ ( self : str ) -> str:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase : List[Any] = ModelToSave(tokenizer=snake_case )
__UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor(self.test_sentences )
__UpperCAmelCase : Tuple = model(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__UpperCAmelCase : Any = Path(snake_case ) / '''saved.model'''
model.save(snake_case )
__UpperCAmelCase : str = tf.keras.models.load_model(snake_case )
__UpperCAmelCase : Optional[int] = loaded_model(snake_case )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 711
|
'''simple docstring'''
from __future__ import annotations
def _a ( _lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : str = [True] * limit
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : List[Any] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__UpperCAmelCase : Dict = i * 2
while index < limit:
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Union[str, Any] = index + i
__UpperCAmelCase : Optional[Any] = [2]
for i in range(3 , _lowercase , 2 ):
if is_prime[i]:
primes.append(_lowercase )
return primes
def _a ( _lowercase : int = 1000000 ):
'''simple docstring'''
__UpperCAmelCase : List[str] = prime_sieve(_lowercase )
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : int = 0
for i in range(len(_lowercase ) ):
for j in range(i + length , len(_lowercase ) ):
__UpperCAmelCase : Optional[Any] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__UpperCAmelCase : Any = j - i
__UpperCAmelCase : List[Any] = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 266
| 0
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A__ : Optional[int] = logging.get_logger(__name__)
A__ : int = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""}
A__ : int = {
"""vocab_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""",
},
"""emoji_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""",
},
}
A__ : List[Any] = {
"""abeja/gpt-neox-japanese-2.7b""": 2048,
}
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple ) -> int:
with open(UpperCAmelCase_ , 'r' , encoding='utf-8' ) as f:
__lowerCamelCase : Optional[int] = json.loads(f.read() )
__lowerCamelCase : Union[str, Any] = collections.OrderedDict()
__lowerCamelCase : Union[str, Any] = collections.OrderedDict()
__lowerCamelCase : Optional[Any] = collections.OrderedDict()
with open(UpperCAmelCase_ , 'r' , encoding='utf-8' ) as f:
__lowerCamelCase : Any = f.readlines()
__lowerCamelCase : Tuple = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(UpperCAmelCase_ ):
__lowerCamelCase : int = b
__lowerCamelCase : Any = idx
for wd in b:
__lowerCamelCase : Union[str, Any] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : List[str] = VOCAB_FILES_NAMES
lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="<|endoftext|>" , SCREAMING_SNAKE_CASE_="<|endoftext|>" , SCREAMING_SNAKE_CASE_="<|startoftext|>" , SCREAMING_SNAKE_CASE_="<|endoftext|>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , do_clean_text=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if not os.path.isfile(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
__lowerCamelCase : int = do_clean_text
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = load_vocab_and_emoji(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowercase_ ( self ) -> List[str]:
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def lowercase_ ( self ) -> Any:
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return self.subword_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ , clean=self.do_clean_text )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return self.vocab.get(SCREAMING_SNAKE_CASE_ , self.vocab.get(self.unk_token ) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> int:
return self.subword_tokenizer.convert_id_to_token(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
__lowerCamelCase : Any = ''.join(SCREAMING_SNAKE_CASE_ ).strip()
return out_string
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[int]:
__lowerCamelCase : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) + [self.eos_token_id] )
if len(SCREAMING_SNAKE_CASE_ ) > self.model_max_length:
__lowerCamelCase : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
__lowerCamelCase : List[Any] = 0
if os.path.isdir(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Tuple = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
__lowerCamelCase : List[Any] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
__lowerCamelCase : Any = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
__lowerCamelCase : Dict = token_index
writer.write(','.join(SCREAMING_SNAKE_CASE_ ) + '\n' )
index += 1
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , SCREAMING_SNAKE_CASE_ )
return vocab_file, emoji_file
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : Optional[int] = vocab # same as swe
__lowerCamelCase : Dict = ids_to_tokens # same as bpe
__lowerCamelCase : str = emoji
__lowerCamelCase : str = np.max([len(SCREAMING_SNAKE_CASE_ ) for w in self.vocab.keys()] )
__lowerCamelCase : Union[str, Any] = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
__lowerCamelCase : Optional[Any] = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
__lowerCamelCase : List[Any] = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
__lowerCamelCase : Dict = re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
__lowerCamelCase : int = re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
__lowerCamelCase : List[str] = re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
__lowerCamelCase : Optional[int] = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
__lowerCamelCase : Any = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
__lowerCamelCase : Optional[Any] = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ) -> Any:
return len(self.ids_to_tokens )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
__lowerCamelCase : Any = self.content_repattera.sub('<URL>' , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self.content_repattera.sub('<EMAIL>' , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self.content_repattera.sub('<TEL>' , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self.content_repattera.sub('<DATE>' , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self.content_repattera.sub('<DATE>' , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self.content_repattera.sub('<PRICE>' , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__lowerCamelCase : Optional[int] = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Tuple:
__lowerCamelCase : Tuple = text.replace(' ' , '<SP>' )
__lowerCamelCase : Optional[Any] = text.replace(' ' , '<SP>' )
__lowerCamelCase : str = text.replace('\r\n' , '<BR>' )
__lowerCamelCase : List[Any] = text.replace('\n' , '<BR>' )
__lowerCamelCase : Tuple = text.replace('\r' , '<BR>' )
__lowerCamelCase : int = text.replace('\t' , '<TAB>' )
__lowerCamelCase : Optional[int] = text.replace('—' , 'ー' )
__lowerCamelCase : List[Any] = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
__lowerCamelCase : Tuple = text.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if clean:
__lowerCamelCase : Union[str, Any] = self.clean_text(SCREAMING_SNAKE_CASE_ )
def check_simbol(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : List[Any] = x.encode()
if len(SCREAMING_SNAKE_CASE_ ) == 1 and len(SCREAMING_SNAKE_CASE_ ) == 2:
__lowerCamelCase : Dict = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc2_a1 and c <= 0Xc2_bf)
or (c >= 0Xc7_80 and c <= 0Xc7_83)
or (c >= 0Xca_b9 and c <= 0Xcb_bf)
or (c >= 0Xcc_80 and c <= 0Xcd_a2)
):
return True
return False
def checkuae(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : str = x.encode()
if len(SCREAMING_SNAKE_CASE_ ) == 1 and len(SCREAMING_SNAKE_CASE_ ) == 3:
__lowerCamelCase : List[Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe2_80_80 and c <= 0Xe2_b0_7f:
return True
return False
__lowerCamelCase : Any = 0
__lowerCamelCase : Union[str, Any] = []
while pos < len(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Dict = min(len(SCREAMING_SNAKE_CASE_ ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
__lowerCamelCase : Dict = [] # (token_id, token, pos)
for e in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , -1 ):
__lowerCamelCase : List[str] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(SCREAMING_SNAKE_CASE_ ) > 2:
__lowerCamelCase : str = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
# the smallest token_id is adopted
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[0] )[0]
result.append(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = e
else:
__lowerCamelCase : List[Any] = pos + 1
__lowerCamelCase : int = text[pos:end]
if check_simbol(SCREAMING_SNAKE_CASE_ ):
result.append('<KIGOU>' )
elif checkuae(SCREAMING_SNAKE_CASE_ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
__lowerCamelCase : str = end
return result
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="\n" ) -> Union[str, Any]:
__lowerCamelCase : Dict = []
__lowerCamelCase : int = []
__lowerCamelCase : Optional[Any] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(SCREAMING_SNAKE_CASE_ ) > 0:
words.append(bytearray(SCREAMING_SNAKE_CASE_ ).decode('utf-8' , errors='replace' ) )
__lowerCamelCase : Union[str, Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(SCREAMING_SNAKE_CASE_ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
words.append(bytearray(SCREAMING_SNAKE_CASE_ ).decode('utf-8' , errors='replace' ) )
__lowerCamelCase : Dict = ''.join(SCREAMING_SNAKE_CASE_ )
return text
| 13
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : List[str] = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 444
| 0
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 56
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ):
__lowercase = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''projector.weight''']
__lowercase = downstream_dict['''projector.bias''']
__lowercase = downstream_dict['''model.post_net.linear.weight''']
__lowercase = downstream_dict['''model.post_net.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
__lowercase = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''model.linear.weight''']
__lowercase = downstream_dict['''model.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''connector.weight''']
__lowercase = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__lowercase = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__lowercase = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = checkpoint['''Downstream''']
__lowercase = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
__lowercase = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__lowercase = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__lowercase = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForXVector''' ):
__lowercase = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__lowercase = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 56
| 1
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
A_ : Any ={
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
A_ : List[str] =logging.WARNING
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = os.getenv("""DATASETS_VERBOSITY""" , __lowercase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option DATASETS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def lowerCamelCase_ ( ):
"""simple docstring"""
return __name__.split(""".""" )[0]
def lowerCamelCase_ ( ):
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowerCamelCase_ ( UpperCAmelCase__ = None ):
"""simple docstring"""
if name is None:
a_ = _get_library_name()
return logging.getLogger(__lowercase )
def lowerCamelCase_ ( ):
"""simple docstring"""
return _get_library_root_logger().getEffectiveLevel()
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
_get_library_root_logger().setLevel(__lowercase )
def lowerCamelCase_ ( ):
"""simple docstring"""
return set_verbosity(__lowercase )
def lowerCamelCase_ ( ):
"""simple docstring"""
return set_verbosity(__lowercase )
def lowerCamelCase_ ( ):
"""simple docstring"""
return set_verbosity(__lowercase )
def lowerCamelCase_ ( ):
"""simple docstring"""
return set_verbosity(__lowercase )
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = False
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowercase_ :
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ): # pylint: disable=unused-argument
"""simple docstring"""
a_ = args[0] if args else None
def __iter__( self ):
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , _UpperCAmelCase ):
"""simple docstring"""
def empty_fn(*_UpperCAmelCase , **_UpperCAmelCase ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
"""simple docstring"""
return self
def __exit__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
return
A_ : Optional[Any] =True
class lowercase_ :
"""simple docstring"""
def __call__( self , *_UpperCAmelCase , _UpperCAmelCase=False , **_UpperCAmelCase ):
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*__a , **__a )
else:
return EmptyTqdm(*__a , **__a )
def lowercase__ ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
"""simple docstring"""
a_ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__a , **__a )
def lowercase__ ( self ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A_ : Optional[Any] =_tqdm_cls()
def lowerCamelCase_ ( ):
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def lowerCamelCase_ ( ):
"""simple docstring"""
global _tqdm_active
a_ = True
def lowerCamelCase_ ( ):
"""simple docstring"""
global _tqdm_active
a_ = False
| 483
|
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
lowercase : int = logging.getLogger(__name__)
class _a (a__ ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = """masked_bert"""
def __init__( self ,__a=30_522 ,__a=768 ,__a=12 ,__a=12 ,__a=3_072 ,__a="gelu" ,__a=0.1 ,__a=0.1 ,__a=512 ,__a=2 ,__a=0.02 ,__a=1E-12 ,__a=0 ,__a="topK" ,__a="constant" ,__a=0.0 ,**__a ,) -> List[str]:
super().__init__(pad_token_id=__a ,**__a )
snake_case : Dict = vocab_size
snake_case : Optional[Any] = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : Dict = hidden_act
snake_case : Any = intermediate_size
snake_case : Optional[int] = hidden_dropout_prob
snake_case : Optional[Any] = attention_probs_dropout_prob
snake_case : List[Any] = max_position_embeddings
snake_case : int = type_vocab_size
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = pruning_method
snake_case : Union[str, Any] = mask_init
snake_case : int = mask_scale
| 116
| 0
|
"""simple docstring"""
import math
import unittest
def lowercase ( __snake_case : Any ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : int ) -> Union[str, Any]:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def A ( self : Optional[int] ) -> List[str]:
with self.assertRaises(A ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , )
self.assertFalse(
is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 701
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A : int = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ['''DeiTFeatureExtractor''']
__A : int = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 141
| 0
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 256
|
from __future__ import annotations
from typing import Any
class A( UpperCamelCase ):
'''simple docstring'''
pass
class A:
'''simple docstring'''
def __init__( self : List[str] , A_ : Any ) -> None:
"""simple docstring"""
lowerCamelCase_ = data
lowerCamelCase_ = None
def __iter__( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self
lowerCamelCase_ = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(A_ )
yield node.data
lowerCamelCase_ = node.next_node
@property
def a__ ( self : List[str] ) -> bool:
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowerCamelCase : int = Node(1)
lowerCamelCase : Optional[int] = Node(2)
lowerCamelCase : Union[str, Any] = Node(3)
lowerCamelCase : List[Any] = Node(4)
print(root_node.has_loop) # False
lowerCamelCase : int = root_node.next_node
print(root_node.has_loop) # True
lowerCamelCase : Dict = Node(5)
lowerCamelCase : Optional[int] = Node(6)
lowerCamelCase : str = Node(5)
lowerCamelCase : Union[str, Any] = Node(6)
print(root_node.has_loop) # False
lowerCamelCase : List[str] = Node(1)
print(root_node.has_loop) # False
| 70
| 0
|
"""simple docstring"""
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
UpperCamelCase = """bert-base-cased"""
UpperCamelCase = """fp16"""
UpperCamelCase = """bf16"""
UpperCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowercase_ (_UpperCAmelCase ):
def lowerCamelCase__ ( self ) ->str:
'''simple docstring'''
super().setUp()
_a = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def lowerCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(_UpperCAmelCase ):
_a = self.dist_env.copy()
_a = f'''{i + 1}'''
_a = strategy
with mockenv_context(**_UpperCAmelCase ):
_a = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def lowerCamelCase__ ( self ) ->Dict:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(_UpperCAmelCase ):
_a = self.dist_env.copy()
_a = prefetch_policy
with mockenv_context(**_UpperCAmelCase ):
_a = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def lowerCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(_UpperCAmelCase ):
_a = self.dist_env.copy()
_a = state_dict_type
with mockenv_context(**_UpperCAmelCase ):
_a = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def lowerCamelCase__ ( self ) ->str:
'''simple docstring'''
_a = AutoModel.from_pretrained(_UpperCAmelCase )
for policy in FSDP_AUTO_WRAP_POLICY:
_a = self.dist_env.copy()
_a = policy
if policy == "TRANSFORMER_BASED_WRAP":
_a = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
_a = "2000"
with mockenv_context(**_UpperCAmelCase ):
_a = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_UpperCAmelCase )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_a = self.dist_env.copy()
_a = "TRANSFORMER_BASED_WRAP"
_a = "T5Layer"
with mockenv_context(**_UpperCAmelCase ):
_a = FullyShardedDataParallelPlugin()
with self.assertRaises(_UpperCAmelCase ) as cm:
fsdp_plugin.set_auto_wrap_policy(_UpperCAmelCase )
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) )
_a = self.dist_env.copy()
_a = "SIZE_BASED_WRAP"
_a = "0"
with mockenv_context(**_UpperCAmelCase ):
_a = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_UpperCAmelCase )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def lowerCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_a = self.dist_env.copy()
_a = mp_dtype
with mockenv_context(**_UpperCAmelCase ):
_a = Accelerator()
if mp_dtype == "fp16":
_a = torch.floataa
elif mp_dtype == "bf16":
_a = torch.bfloataa
_a = MixedPrecision(param_dtype=_UpperCAmelCase , reduce_dtype=_UpperCAmelCase , buffer_dtype=_UpperCAmelCase )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , _UpperCAmelCase )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , _UpperCAmelCase ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(_UpperCAmelCase )
def lowerCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_a = self.dist_env.copy()
_a = str(_UpperCAmelCase ).lower()
with mockenv_context(**_UpperCAmelCase ):
_a = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=_UpperCAmelCase ) )
@require_fsdp
@require_multi_gpu
@slow
class lowercase_ (_UpperCAmelCase ):
def lowerCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
super().setUp()
_a = 0.82
_a = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
_a = {
"multi_gpu_fp16": 3_2_0_0,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2_0_0_0,
"fsdp_full_shard_transformer_based_wrap_fp16": 1_9_0_0,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_a = 1_6_0
_a = 1_6_0
_a = inspect.getfile(accelerate.test_utils )
_a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] )
def lowerCamelCase__ ( self ) ->Dict:
'''simple docstring'''
_a = os.path.join(self.test_scripts_folder , "test_performance.py" )
_a = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
_a = cmd.copy()
for i, strategy in enumerate(_UpperCAmelCase ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no" )
else:
cmd_config.append("--mixed_precision=fp16" )
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
def lowerCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
_a = os.path.join(self.test_scripts_folder , "test_checkpointing.py" )
_a = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(_UpperCAmelCase ):
_a = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
_a = len(_UpperCAmelCase )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_a = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
"--partial_train_epoch=1",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
_a = cmd_config[:-1]
_a = os.path.join(self.tmpdir , "epoch_0" )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
def lowerCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
_a = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py" )
_a = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_a = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"] )
else:
cmd_config.extend(["--mixed_precision=no"] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"] )
for i, strategy in enumerate(_UpperCAmelCase ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
| 714
|
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ (_UpperCAmelCase ):
A__ : Union[str, Any] = (PNDMScheduler,)
A__ : Optional[int] = (('''num_inference_steps''', 50),)
def lowerCamelCase__ ( self , **a_ ) ->Optional[Any]:
'''simple docstring'''
_a = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a_ )
return config
def lowerCamelCase__ ( self , a_=0 , **a_ ) ->Tuple:
'''simple docstring'''
_a = dict(self.forward_default_kwargs )
_a = kwargs.pop("num_inference_steps" , a_ )
_a = self.dummy_sample
_a = 0.1 * sample
_a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_a = self.get_scheduler_config(**a_ )
_a = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_a = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_a = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_a = dummy_past_residuals[:]
_a = scheduler.step_prk(a_ , a_ , a_ , **a_ ).prev_sample
_a = new_scheduler.step_prk(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a = scheduler.step_plms(a_ , a_ , a_ , **a_ ).prev_sample
_a = new_scheduler.step_plms(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self ) ->Dict:
'''simple docstring'''
pass
def lowerCamelCase__ ( self , a_=0 , **a_ ) ->Tuple:
'''simple docstring'''
_a = dict(self.forward_default_kwargs )
_a = kwargs.pop("num_inference_steps" , a_ )
_a = self.dummy_sample
_a = 0.1 * sample
_a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_a = self.get_scheduler_config()
_a = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
_a = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_a = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
_a = dummy_past_residuals[:]
_a = scheduler.step_prk(a_ , a_ , a_ , **a_ ).prev_sample
_a = new_scheduler.step_prk(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a = scheduler.step_plms(a_ , a_ , a_ , **a_ ).prev_sample
_a = new_scheduler.step_plms(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self , **a_ ) ->Optional[int]:
'''simple docstring'''
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(**a_ )
_a = scheduler_class(**a_ )
_a = 1_0
_a = self.dummy_model()
_a = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.prk_timesteps ):
_a = model(a_ , a_ )
_a = scheduler.step_prk(a_ , a_ , a_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_a = model(a_ , a_ )
_a = scheduler.step_plms(a_ , a_ , a_ ).prev_sample
return sample
def lowerCamelCase__ ( self ) ->Dict:
'''simple docstring'''
_a = dict(self.forward_default_kwargs )
_a = kwargs.pop("num_inference_steps" , a_ )
for scheduler_class in self.scheduler_classes:
_a = self.get_scheduler_config()
_a = scheduler_class(**a_ )
_a = self.dummy_sample
_a = 0.1 * sample
if num_inference_steps is not None and hasattr(a_ , "set_timesteps" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_ , "set_timesteps" ):
_a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_a = dummy_past_residuals[:]
_a = scheduler.step_prk(a_ , 0 , a_ , **a_ ).prev_sample
_a = scheduler.step_prk(a_ , 1 , a_ , **a_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_a = scheduler.step_plms(a_ , 0 , a_ , **a_ ).prev_sample
_a = scheduler.step_plms(a_ , 1 , a_ , **a_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase__ ( self ) ->int:
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=a_ )
def lowerCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a_ )
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(steps_offset=1 )
_a = scheduler_class(**a_ )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def lowerCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=a_ , beta_end=a_ )
def lowerCamelCase__ ( self ) ->Dict:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a_ )
def lowerCamelCase__ ( self ) ->str:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def lowerCamelCase__ ( self ) ->Any:
'''simple docstring'''
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=a_ )
def lowerCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=a_ )
def lowerCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
_a = 2_7
for scheduler_class in self.scheduler_classes:
_a = self.dummy_sample
_a = 0.1 * sample
_a = self.get_scheduler_config()
_a = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_a = scheduler.step_prk(a_ , a_ , a_ ).prev_sample
def lowerCamelCase__ ( self ) ->Any:
'''simple docstring'''
with self.assertRaises(a_ ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**a_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowerCamelCase__ ( self ) ->Any:
'''simple docstring'''
_a = self.full_loop()
_a = torch.sum(torch.abs(a_ ) )
_a = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 198.1_318 ) < 1E-2
assert abs(result_mean.item() - 0.2_580 ) < 1E-3
def lowerCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
_a = self.full_loop(prediction_type="v_prediction" )
_a = torch.sum(torch.abs(a_ ) )
_a = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 67.3_986 ) < 1E-2
assert abs(result_mean.item() - 0.0_878 ) < 1E-3
def lowerCamelCase__ ( self ) ->int:
'''simple docstring'''
_a = self.full_loop(set_alpha_to_one=a_ , beta_start=0.01 )
_a = torch.sum(torch.abs(a_ ) )
_a = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 230.0_399 ) < 1E-2
assert abs(result_mean.item() - 0.2_995 ) < 1E-3
def lowerCamelCase__ ( self ) ->str:
'''simple docstring'''
_a = self.full_loop(set_alpha_to_one=a_ , beta_start=0.01 )
_a = torch.sum(torch.abs(a_ ) )
_a = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 186.9_482 ) < 1E-2
assert abs(result_mean.item() - 0.2_434 ) < 1E-3
| 612
| 0
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__=-1 ) -> Optional[int]:
# in NER datasets, the last column is usually reserved for NER label
A__ = label_idx
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = mode.value
A__ = os.path.join(SCREAMING_SNAKE_CASE__ , f"""{mode}.txt""" )
A__ = 1
A__ = []
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
A__ = []
A__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
guid_index += 1
A__ = []
A__ = []
else:
A__ = line.split(" " )
words.append(splits[0] )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
return examples
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
A__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(SCREAMING_SNAKE_CASE__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
A__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(SCREAMING_SNAKE_CASE__ )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
A__ = f.read().splitlines()
if "O" not in labels:
A__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self ) -> Tuple:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
A__ = f.read().splitlines()
if "O" not in labels:
A__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = mode.value
A__ = os.path.join(SCREAMING_SNAKE_CASE__ , f"""{mode}.txt""" )
A__ = 1
A__ = []
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ):
A__ = []
A__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
guid_index += 1
return examples
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
A__ = 0
for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ):
A__ = preds_list[example_id]
A__ = ""
for token in sentence:
out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(SCREAMING_SNAKE_CASE__ )
example_id += 1
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 104
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_A = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def A_ ( __SCREAMING_SNAKE_CASE : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
__SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__SCREAMING_SNAKE_CASE : Dict = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__SCREAMING_SNAKE_CASE : str = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(f'Job {i:>2} is {job[0]} at {job[1]}')
| 158
| 0
|
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def lowercase ( a__ : Union[str, Any] , a__ : str ) -> int:
_UpperCamelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowercase ( a__ : List[str] , a__ : List[Any] ) -> Optional[Any]:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_UpperCamelCase = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
_UpperCamelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
_UpperCamelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_UpperCamelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowercase ( a__ : List[Any] , a__ : List[str] , a__ : Dict ) -> str:
_UpperCamelCase = dct.pop(a__ )
_UpperCamelCase = val
def lowercase ( a__ : List[Any] ) -> Union[str, Any]:
if "handwritten" in checkpoint_url:
_UpperCamelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_UpperCamelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
_UpperCamelCase = Image.open(requests.get(a__ , stream=a__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowercase ( a__ : Any , a__ : List[str] ) -> Tuple:
_UpperCamelCase = ViTConfig(image_size=384 , qkv_bias=a__ )
_UpperCamelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_UpperCamelCase = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
_UpperCamelCase = 1024
_UpperCamelCase = 4096
_UpperCamelCase = 24
_UpperCamelCase = 16
_UpperCamelCase = 1024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_UpperCamelCase = False
_UpperCamelCase = '''relu'''
_UpperCamelCase = 1024
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = False
# load HuggingFace model
_UpperCamelCase = ViTModel(a__ , add_pooling_layer=a__ )
_UpperCamelCase = TrOCRForCausalLM(a__ )
_UpperCamelCase = VisionEncoderDecoderModel(encoder=a__ , decoder=a__ )
model.eval()
# load state_dict of original model, rename some keys
_UpperCamelCase = torch.hub.load_state_dict_from_url(a__ , map_location='''cpu''' , check_hash=a__ )['''model''']
_UpperCamelCase = create_rename_keys(a__ , a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , a__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_UpperCamelCase = state_dict.pop(a__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
_UpperCamelCase = val
else:
_UpperCamelCase = val
# load state dict
model.load_state_dict(a__ )
# Check outputs on an image
_UpperCamelCase = ViTImageProcessor(size=encoder_config.image_size )
_UpperCamelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
_UpperCamelCase = TrOCRProcessor(a__ , a__ )
_UpperCamelCase = processor(images=prepare_img(a__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
_UpperCamelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_UpperCamelCase = model(pixel_values=a__ , decoder_input_ids=a__ )
_UpperCamelCase = outputs.logits
_UpperCamelCase = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
_UpperCamelCase = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
_UpperCamelCase = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
_UpperCamelCase = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
_UpperCamelCase = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , a__ , atol=1e-3 ), "First elements of logits not as expected"
Path(a__ ).mkdir(exist_ok=a__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(a__ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
UpperCAmelCase = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 342
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCAmelCase = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase = {
"""allenai/led-base-16384""": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase ( ) -> Union[str, Any]:
_UpperCamelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_UpperCamelCase = bs[:]
_UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(a__ )
cs.append(2**8 + n )
n += 1
_UpperCamelCase = [chr(a__ ) for n in cs]
return dict(zip(a__ , a__ ) )
def lowercase ( a__ : Any ) -> Union[str, Any]:
_UpperCamelCase = set()
_UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase = char
return pairs
class UpperCAmelCase_ ( _lowercase):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : List[str]="replace" , __UpperCamelCase : Any="<s>" , __UpperCamelCase : List[str]="</s>" , __UpperCamelCase : Tuple="</s>" , __UpperCamelCase : Any="<s>" , __UpperCamelCase : Tuple="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : List[Any]=False , **__UpperCamelCase : Optional[int] , ) -> Optional[Any]:
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
with open(__UpperCamelCase , encoding='''utf-8''' ) as vocab_handle:
_UpperCamelCase = json.load(__UpperCamelCase )
_UpperCamelCase = {v: k for k, v in self.encoder.items()}
_UpperCamelCase = errors # how to handle errors in decoding
_UpperCamelCase = bytes_to_unicode()
_UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCamelCase , encoding='''utf-8''' ) as merges_handle:
_UpperCamelCase = merges_handle.read().split('''\n''' )[1:-1]
_UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCamelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCamelCase = {}
_UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCamelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _UpperCamelCase ( self : Dict ) -> List[Any]:
return len(self.encoder )
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCamelCase ( self : int , __UpperCamelCase : int ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
_UpperCamelCase = tuple(__UpperCamelCase )
_UpperCamelCase = get_pairs(__UpperCamelCase )
if not pairs:
return token
while True:
_UpperCamelCase = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCamelCase , _UpperCamelCase = bigram
_UpperCamelCase = []
_UpperCamelCase = 0
while i < len(__UpperCamelCase ):
try:
_UpperCamelCase = word.index(__UpperCamelCase , __UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCamelCase = j
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCamelCase = tuple(__UpperCamelCase )
_UpperCamelCase = new_word
if len(__UpperCamelCase ) == 1:
break
else:
_UpperCamelCase = get_pairs(__UpperCamelCase )
_UpperCamelCase = ''' '''.join(__UpperCamelCase )
_UpperCamelCase = word
return word
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : List[str] ) -> Optional[int]:
_UpperCamelCase = []
for token in re.findall(self.pat , __UpperCamelCase ):
_UpperCamelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCamelCase ).split(''' ''' ) )
return bpe_tokens
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : List[Any] ) -> Optional[Any]:
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
return self.decoder.get(__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[Any] ) -> Any:
_UpperCamelCase = ''''''.join(__UpperCamelCase )
_UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + '''\n''' )
_UpperCamelCase = 0
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_UpperCamelCase = token_index
writer.write(''' '''.join(__UpperCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , __UpperCamelCase : Any , __UpperCamelCase : Tuple=False , **__UpperCamelCase : Optional[int] ) -> Any:
_UpperCamelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCamelCase ) > 0 and not text[0].isspace()):
_UpperCamelCase = ''' ''' + text
return (text, kwargs)
def _UpperCamelCase ( self : Any , __UpperCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , ) -> dict:
_UpperCamelCase = super()._pad(
encoded_inputs=__UpperCamelCase , max_length=__UpperCamelCase , padding_strategy=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
# Load from model defaults
if return_attention_mask is None:
_UpperCamelCase = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_UpperCamelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_UpperCamelCase = len(encoded_inputs['''global_attention_mask'''] ) != len(__UpperCamelCase )
if needs_to_be_padded:
_UpperCamelCase = len(__UpperCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_UpperCamelCase = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
_UpperCamelCase = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 342
| 1
|
from __future__ import annotations
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None ):
if start is None:
A : Union[str, Any] = 0
if end is None:
A : Union[str, Any] = len(lowercase_ ) - 1
if start >= end:
return
A : List[str] = (start + end) // 2
slowsort(lowercase_ , lowercase_ , lowercase_ )
slowsort(lowercase_ , mid + 1 , lowercase_ )
if sequence[end] < sequence[mid]:
A : str = sequence[mid], sequence[end]
slowsort(lowercase_ , lowercase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 542
|
import os
def __UpperCAmelCase( ):
with open(os.path.dirname(lowercase_ ) + '''/p022_names.txt''' ) as file:
_lowerCamelCase : Optional[int] = str(file.readlines()[0] )
_lowerCamelCase : List[Any] = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : Tuple = 0
for i, name in enumerate(lowercase_ ):
for letter in name:
name_score += ord(lowercase_ ) - 64
total_score += (i + 1) * name_score
_lowerCamelCase : Optional[int] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 114
| 0
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("transformers.models.speecht5")
_UpperCAmelCase = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
_UpperCAmelCase = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
_UpperCAmelCase = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
_UpperCAmelCase = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
_UpperCAmelCase = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
_UpperCAmelCase = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
_UpperCAmelCase = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
_UpperCAmelCase = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
_UpperCAmelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_UpperCAmelCase = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_UpperCAmelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_UpperCAmelCase = []
_UpperCAmelCase = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
_UpperCAmelCase = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
_UpperCAmelCase = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
_UpperCAmelCase = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def _lowerCamelCase ( _a , _a , _a , _a , _a ):
"""simple docstring"""
for attribute in key.split('''.''' ):
_lowerCamelCase = getattr(_a , _a )
if weight_type is not None:
_lowerCamelCase = getattr(_a , _a ).shape
else:
_lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
elif weight_type == "running_mean":
_lowerCamelCase = value
elif weight_type == "running_var":
_lowerCamelCase = value
elif weight_type == "num_batches_tracked":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _lowerCamelCase ( _a , _a ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_lowerCamelCase , _lowerCamelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowerCamelCase ( _a , _a , _a ):
"""simple docstring"""
_lowerCamelCase = []
if task == "s2t":
_lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
_lowerCamelCase = MAPPING_S2T
_lowerCamelCase = IGNORE_KEYS_S2T
elif task == "t2s":
_lowerCamelCase = None
_lowerCamelCase = MAPPING_T2S
_lowerCamelCase = IGNORE_KEYS_T2S
elif task == "s2s":
_lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
_lowerCamelCase = MAPPING_S2S
_lowerCamelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(_a , _a ):
logger.info(F'''{name} was ignored''' )
continue
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_lowerCamelCase , _lowerCamelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
_lowerCamelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(_a )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , _a )
if "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
_lowerCamelCase = '''weight'''
elif "running_mean" in name:
_lowerCamelCase = '''running_mean'''
elif "running_var" in name:
_lowerCamelCase = '''running_var'''
elif "num_batches_tracked" in name:
_lowerCamelCase = '''num_batches_tracked'''
else:
_lowerCamelCase = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _lowerCamelCase ( _a , _a , _a , _a , _a ):
"""simple docstring"""
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_lowerCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_lowerCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
_lowerCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
_lowerCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_a )
@torch.no_grad()
def _lowerCamelCase ( _a , _a , _a , _a=None , _a=None , _a=None , ):
"""simple docstring"""
if config_path is not None:
_lowerCamelCase = SpeechTaConfig.from_pretrained(_a )
else:
_lowerCamelCase = SpeechTaConfig()
if task == "s2t":
_lowerCamelCase = config.max_text_positions
_lowerCamelCase = SpeechTaForSpeechToText(_a )
elif task == "t2s":
_lowerCamelCase = 1_8_7_6
_lowerCamelCase = 6_0_0
_lowerCamelCase = config.max_speech_positions
_lowerCamelCase = SpeechTaForTextToSpeech(_a )
elif task == "s2s":
_lowerCamelCase = 1_8_7_6
_lowerCamelCase = config.max_speech_positions
_lowerCamelCase = SpeechTaForSpeechToSpeech(_a )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
_lowerCamelCase = SpeechTaTokenizer(_a , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken('''<mask>''' , lstrip=_a , rstrip=_a )
_lowerCamelCase = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
_lowerCamelCase = SpeechTaFeatureExtractor()
_lowerCamelCase = SpeechTaProcessor(tokenizer=_a , feature_extractor=_a )
processor.save_pretrained(_a )
_lowerCamelCase = torch.load(_a )
recursively_load_weights(fairseq_checkpoint['''model'''] , _a , _a )
model.save_pretrained(_a )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(_a )
model.push_to_hub(_a )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_UpperCAmelCase = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 297
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
_UpperCamelCase = "xmod"
def __init__( self , a__=3_05_22 , a__=7_68 , a__=12 , a__=12 , a__=30_72 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=2 , a__=0.02 , a__=1E-12 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , a__=None , a__=False , a__=2 , a__=False , a__=True , a__=True , a__=("en_XX",) , a__=None , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_act
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = position_embedding_type
_lowerCamelCase = use_cache
_lowerCamelCase = classifier_dropout
_lowerCamelCase = pre_norm
_lowerCamelCase = adapter_reduction_factor
_lowerCamelCase = adapter_layer_norm
_lowerCamelCase = adapter_reuse_layer_norm
_lowerCamelCase = ln_before_adapter
_lowerCamelCase = list(a__ )
_lowerCamelCase = default_language
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ):
if self.task == "multiple-choice":
_lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 297
| 1
|
'''simple docstring'''
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _A ( A ) -> int:
lowercase : Tuple = botoa.client("iam" )
lowercase : Any = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_A ,AssumeRolePolicyDocument=json.dumps(_A ,indent=2 ) )
lowercase : List[str] = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_A ,PolicyName=F'''{role_name}_policy_permission''' ,PolicyDocument=json.dumps(_A ,indent=2 ) ,)
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def _A ( A ) -> int:
lowercase : int = botoa.client("iam" )
return iam_client.get_role(RoleName=_A )["Role"]["Arn"]
def _A ( ) -> Any:
lowercase : Tuple = _ask_options(
"How do you want to authorize?" ,["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] ,_A ,)
lowercase : str = None
if credentials_configuration == 0:
lowercase : str = _ask_field("Enter your AWS Profile name: [default] " ,default="default" )
lowercase : List[str] = aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
lowercase : Dict = _ask_field("AWS Access Key ID: " )
lowercase : Any = aws_access_key_id
lowercase : List[str] = _ask_field("AWS Secret Access Key: " )
lowercase : List[Any] = aws_secret_access_key
lowercase : Any = _ask_field("Enter your AWS Region: [us-east-1]" ,default="us-east-1" )
lowercase : List[Any] = aws_region
lowercase : Optional[int] = _ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" ,["Provide IAM Role name", "Create new IAM role using credentials"] ,_A ,)
if role_management == 0:
lowercase : int = _ask_field("Enter your IAM role name: " )
else:
lowercase : List[str] = """accelerate_sagemaker_execution_role"""
print(F'''Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials''' )
_create_iam_role_for_sagemaker(_A )
lowercase : str = _ask_field(
"Do you want to use custom Docker image? [yes/NO]: " ,_convert_yes_no_to_bool ,default=_A ,error_message="Please enter yes or no." ,)
lowercase : int = None
if is_custom_docker_image:
lowercase : Tuple = _ask_field("Enter your Docker image: " ,lambda A : str(_A ).lower() )
lowercase : str = _ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " ,_convert_yes_no_to_bool ,default=_A ,error_message="Please enter yes or no." ,)
lowercase : Union[str, Any] = None
if is_sagemaker_inputs_enabled:
lowercase : Any = _ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " ,lambda A : str(_A ).lower() ,)
lowercase : Optional[int] = _ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " ,_convert_yes_no_to_bool ,default=_A ,error_message="Please enter yes or no." ,)
lowercase : Optional[Any] = None
if is_sagemaker_metrics_enabled:
lowercase : Tuple = _ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " ,lambda A : str(_A ).lower() ,)
lowercase : Dict = _ask_options(
"What is the distributed mode?" ,["No distributed training", "Data parallelism"] ,_convert_sagemaker_distributed_mode ,)
lowercase : List[Any] = {}
lowercase : str = _ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" ,_convert_yes_no_to_bool ,default=_A ,error_message="Please enter yes or no." ,)
if use_dynamo:
lowercase : List[str] = """dynamo_"""
lowercase : int = _ask_options(
"Which dynamo backend would you like to use?" ,[x.lower() for x in DYNAMO_BACKENDS] ,_convert_dynamo_backend ,default=2 ,)
lowercase : Tuple = _ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " ,_convert_yes_no_to_bool ,default=_A ,error_message="Please enter yes or no." ,)
if use_custom_options:
lowercase : Optional[int] = _ask_options(
"Which mode do you want to use?" ,_A ,lambda A : TORCH_DYNAMO_MODES[int(_A )] ,default="default" ,)
lowercase : Union[str, Any] = _ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " ,_convert_yes_no_to_bool ,default=_A ,error_message="Please enter yes or no." ,)
lowercase : List[str] = _ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " ,_convert_yes_no_to_bool ,default=_A ,error_message="Please enter yes or no." ,)
lowercase : Union[str, Any] = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
lowercase : Optional[int] = _ask_options(
_A ,_A ,lambda A : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_A )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
lowercase : Any = _ask_field(_A ,lambda A : str(_A ).lower() ,default="ml.p3.2xlarge" )
lowercase : int = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
lowercase : Union[str, Any] = _ask_field(
"How many machines do you want use? [1]: " ,_A ,default=1 ,)
lowercase : List[str] = _ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" ,["no", "fp16", "bf16", "fp8"] ,_convert_mixed_precision ,)
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=_A ,compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER ,distributed_type=_A ,use_cpu=_A ,dynamo_config=_A ,eca_instance_type=_A ,profile=_A ,region=_A ,iam_role_name=_A ,mixed_precision=_A ,num_machines=_A ,sagemaker_inputs_file=_A ,sagemaker_metrics_file=_A ,)
| 372
|
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
_lowerCAmelCase = {
'b0': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 2_24,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 2_40,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 14_08,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 2_60,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 15_36,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 3_00,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 17_92,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 3_80,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 20_48,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 4_56,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 23_04,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 5_28,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 25_60,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 6_00,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def UpperCamelCase ( _A ) -> List[str]:
lowercase : List[str] = EfficientNetConfig()
lowercase : Any = CONFIG_MAP[model_name]["""hidden_dim"""]
lowercase : List[str] = CONFIG_MAP[model_name]["""width_coef"""]
lowercase : str = CONFIG_MAP[model_name]["""depth_coef"""]
lowercase : int = CONFIG_MAP[model_name]["""image_size"""]
lowercase : List[Any] = CONFIG_MAP[model_name]["""dropout_rate"""]
lowercase : int = CONFIG_MAP[model_name]["""dw_padding"""]
lowercase : Optional[int] = """huggingface/label-files"""
lowercase : int = """imagenet-1k-id2label.json"""
lowercase : Any = 1_000
lowercase : Any = json.load(open(hf_hub_download(_A , _A , repo_type="""dataset""" ) , """r""" ) )
lowercase : Optional[int] = {int(_A ): v for k, v in idalabel.items()}
lowercase : int = idalabel
lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( ) -> Tuple:
lowercase : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase : Optional[int] = Image.open(requests.get(_A , stream=_A ).raw )
return im
def UpperCamelCase ( _A ) -> Optional[Any]:
lowercase : str = CONFIG_MAP[model_name]["""image_size"""]
lowercase : Optional[int] = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=_A , )
return preprocessor
def UpperCamelCase ( _A ) -> Optional[int]:
lowercase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
lowercase : Optional[Any] = sorted(set(_A ) )
lowercase : Dict = len(_A )
lowercase : List[str] = {b: str(_A ) for b, i in zip(_A , range(_A ) )}
lowercase : Union[str, Any] = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
lowercase : str = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
lowercase : Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase : Optional[int] = """efficientnet.""" + item[1]
lowercase : Any = """classifier.weight"""
lowercase : Tuple = """classifier.bias"""
return key_mapping
def UpperCamelCase ( _A , _A , _A ) -> Optional[Any]:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase : List[Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase : str = torch.from_numpy(_A ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase : Optional[int] = torch.from_numpy(_A ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase : List[Any] = torch.from_numpy(np.transpose(_A ) )
else:
lowercase : Optional[int] = torch.from_numpy(_A )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_A )
@torch.no_grad()
def UpperCamelCase ( _A , _A , _A , _A ) -> str:
lowercase : Any = model_classes[model_name](
include_top=_A , weights="""imagenet""" , input_tensor=_A , input_shape=_A , pooling=_A , classes=1_000 , classifier_activation="""softmax""" , )
lowercase : Dict = original_model.trainable_variables
lowercase : Any = original_model.non_trainable_variables
lowercase : Any = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase : Dict = param.numpy()
lowercase : List[str] = list(tf_params.keys() )
# Load HuggingFace model
lowercase : str = get_efficientnet_config(_A )
lowercase : List[Any] = EfficientNetForImageClassification(_A ).eval()
lowercase : Optional[int] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
lowercase : int = rename_keys(_A )
replace_params(_A , _A , _A )
# Initialize preprocessor and preprocess input image
lowercase : Optional[int] = convert_image_processor(_A )
lowercase : Any = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase : Union[str, Any] = hf_model(**_A )
lowercase : List[Any] = outputs.logits.detach().numpy()
# Original model inference
lowercase : Optional[Any] = False
lowercase : str = CONFIG_MAP[model_name]["""image_size"""]
lowercase : Optional[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase : Optional[Any] = image.img_to_array(_A )
lowercase : Dict = np.expand_dims(_A , axis=0 )
lowercase : List[str] = original_model.predict(_A )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_A , _A , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(_A ):
os.mkdir(_A )
# Save converted model and image processor
hf_model.save_pretrained(_A )
preprocessor.save_pretrained(_A )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase : Dict = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(_A )
hf_model.push_to_hub(_A )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
_lowerCAmelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 264
| 0
|
import random
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase , lowerCamelCase = [], [], []
for element in data:
if element < pivot:
less.append(UpperCAmelCase__ )
elif element > pivot:
greater.append(UpperCAmelCase__ )
else:
equal.append(UpperCAmelCase__ )
return less, equal, greater
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if index >= len(UpperCAmelCase__ ) or index < 0:
return None
lowerCamelCase = items[random.randint(0 , len(UpperCAmelCase__ ) - 1 )]
lowerCamelCase = 0
lowerCamelCase , lowerCamelCase , lowerCamelCase = _partition(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase = len(UpperCAmelCase__ )
lowerCamelCase = len(UpperCAmelCase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(UpperCAmelCase__ , UpperCAmelCase__ )
# must be in larger
else:
return quick_select(UpperCAmelCase__ , index - (m + count) )
| 484
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ : Optional[int] = (3, 9, -1_1, 0, 7, 5, 1, -1)
a_ : str = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = 42
_A = 42
class lowerCamelCase__ :
"""simple docstring"""
def __init__(self , __a ):
'''simple docstring'''
lowerCamelCase = None
for i in sorted(__a , reverse=__a ):
lowerCamelCase = Node(__a , self.head )
def __iter__(self ):
'''simple docstring'''
lowerCamelCase = self.head
while node:
yield node.data
lowerCamelCase = node.next_node
def __len__(self ):
'''simple docstring'''
return sum(1 for _ in self )
def __str__(self ):
'''simple docstring'''
return " -> ".join([str(__a ) for node in self] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Any = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 484
| 1
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> np.ndarray:
return input_array.reshape((input_array.size, 1) )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> np.ndarray:
A_ = np.nan
for i in range(UpperCAmelCase__ ):
A_ = features[:, labels == i]
A_ = data.mean(1 )
# Centralize the data of class i
A_ = data - column_reshape(UpperCAmelCase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCAmelCase__, centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
A_ = np.dot(UpperCAmelCase__, centered_data.T )
return covariance_sum / features.shape[1]
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> np.ndarray:
A_ = features.mean(1 )
A_ = np.nan
for i in range(UpperCAmelCase__ ):
A_ = features[:, labels == i]
A_ = data.shape[1]
A_ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCAmelCase__ ) - column_reshape(UpperCAmelCase__ ), (column_reshape(UpperCAmelCase__ ) - column_reshape(UpperCAmelCase__ )).T, )
else:
# If covariance_sum is np.nan (i.e. first loop)
A_ = device_data * np.dot(
column_reshape(UpperCAmelCase__ ) - column_reshape(UpperCAmelCase__ ), (column_reshape(UpperCAmelCase__ ) - column_reshape(UpperCAmelCase__ )).T, )
return covariance_sum / features.shape[1]
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> np.ndarray:
# Check if the features have been loaded
if features.any():
A_ = features.mean(1 )
# Center the dataset
A_ = features - np.reshape(UpperCAmelCase__, (data_mean.size, 1) )
A_ = np.dot(UpperCAmelCase__, centered_data.T ) / features.shape[1]
A_ , A_ = np.linalg.eigh(UpperCAmelCase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
A_ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
A_ = np.dot(filtered_eigenvectors.T, UpperCAmelCase__ )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR, format="""%(message)s""", force=UpperCAmelCase__ )
logging.error("""Dataset empty""" )
raise AssertionError
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
A_ , A_ = eigh(
covariance_between_classes(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ), covariance_within_classes(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ), )
A_ = eigenvectors[:, ::-1][:, :dimensions]
A_ , A_ , A_ = np.linalg.svd(UpperCAmelCase__ )
A_ = svd_matrix[:, 0:dimensions]
A_ = np.dot(filtered_svd_matrix.T, UpperCAmelCase__ )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR, format="""%(message)s""", force=UpperCAmelCase__ )
logging.error("""Dataset empty""" )
raise AssertionError
def UpperCAmelCase__ ( ) -> None:
# Create dummy dataset with 2 classes and 3 features
A_ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
A_ = np.array([0, 0, 0, 1, 1] )
A_ = 2
A_ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCAmelCase__ ) as error_info:
A_ = linear_discriminant_analysis(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if isinstance(UpperCAmelCase__, np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def UpperCAmelCase__ ( ) -> None:
A_ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
A_ = 2
A_ = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCAmelCase__ ) as error_info:
A_ = principal_component_analysis(UpperCAmelCase__, UpperCAmelCase__ )
if not np.allclose(UpperCAmelCase__, UpperCAmelCase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A__ ( _snake_case ):
lowercase = ["image_processor", "tokenizer"]
lowercase = "OwlViTImageProcessor"
lowercase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCamelCase__ , )
A_ = kwargs.pop("""feature_extractor""" )
A_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="max_length" , UpperCamelCase__="np" , **UpperCamelCase__ ) -> int:
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) or (isinstance(UpperCamelCase__ , UpperCamelCase__ ) and not isinstance(text[0] , UpperCamelCase__ )):
A_ = [self.tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )]
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(text[0] , UpperCamelCase__ ):
A_ = []
# Maximum number of queries across batch
A_ = max([len(UpperCamelCase__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCamelCase__ ) != max_num_queries:
A_ = t + [""" """] * (max_num_queries - len(UpperCamelCase__ ))
A_ = self.tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
encodings.append(UpperCamelCase__ )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
A_ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
A_ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A_ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
A_ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A_ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
A_ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A_ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
A_ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
A_ = BatchEncoding()
A_ = input_ids
A_ = attention_mask
if query_images is not None:
A_ = BatchEncoding()
A_ = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ).pixel_values
A_ = query_pixel_values
if images is not None:
A_ = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
return self.image_processor.post_process(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.image_processor.post_process_object_detection(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCamelCase__ , )
return self.image_processor_class
@property
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCamelCase__ , )
return self.image_processor
| 288
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase : Optional[Any] ={
"""configuration_bridgetower""": [
"""BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BridgeTowerConfig""",
"""BridgeTowerTextConfig""",
"""BridgeTowerVisionConfig""",
],
"""processing_bridgetower""": ["""BridgeTowerProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] =["""BridgeTowerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str =[
"""BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BridgeTowerForContrastiveLearning""",
"""BridgeTowerForImageAndTextRetrieval""",
"""BridgeTowerForMaskedLM""",
"""BridgeTowerModel""",
"""BridgeTowerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 715
|
UpperCAmelCase : Any =0 # The first color of the flag.
UpperCAmelCase : Optional[int] =1 # The second color of the flag.
UpperCAmelCase : Optional[Any] =2 # The third color of the flag.
UpperCAmelCase : Union[str, Any] =(red, white, blue)
def _lowerCAmelCase (_lowerCAmelCase):
if not sequence:
return []
if len(_lowerCAmelCase) == 1:
return list(_lowerCAmelCase)
UpperCamelCase_ = 0
UpperCamelCase_ = len(_lowerCAmelCase) - 1
UpperCamelCase_ = 0
while mid <= high:
if sequence[mid] == colors[0]:
UpperCamelCase_ , UpperCamelCase_ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
UpperCamelCase_ , UpperCamelCase_ = sequence[high], sequence[mid]
high -= 1
else:
UpperCamelCase_ = f"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(_lowerCAmelCase)
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : Tuple =input("""Enter numbers separated by commas:\n""").strip()
UpperCAmelCase : Any =[int(item.strip()) for item in user_input.split(""",""")]
print(F"{dutch_national_flag_sort(unsorted)}")
| 504
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : List[Any] ={"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple =[
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : int =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 440
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = 42
class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self :Tuple , lowercase_ :int = 3 , lowercase_ :int = 3 , lowercase_ :Tuple[str] = ("DownEncoderBlock2D",) , lowercase_ :Tuple[str] = ("UpDecoderBlock2D",) , lowercase_ :Tuple[int] = (64,) , lowercase_ :int = 1 , lowercase_ :str = "silu" , lowercase_ :int = 3 , lowercase_ :int = 32 , lowercase_ :int = 2_56 , lowercase_ :int = 32 , lowercase_ :Optional[int] = None , lowercase_ :float = 0.1_8_2_1_5 , lowercase_ :str = "group" , )-> Optional[int]:
super().__init__()
# pass init params to Encoder
A__ = Encoder(
in_channels=lowercase_ , out_channels=lowercase_ , down_block_types=lowercase_ , block_out_channels=lowercase_ , layers_per_block=lowercase_ , act_fn=lowercase_ , norm_num_groups=lowercase_ , double_z=lowercase_ , )
A__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
A__ = nn.Convad(lowercase_ , lowercase_ , 1 )
A__ = VectorQuantizer(lowercase_ , lowercase_ , beta=0.2_5 , remap=lowercase_ , sane_index_shape=lowercase_ )
A__ = nn.Convad(lowercase_ , lowercase_ , 1 )
# pass init params to Decoder
A__ = Decoder(
in_channels=lowercase_ , out_channels=lowercase_ , up_block_types=lowercase_ , block_out_channels=lowercase_ , layers_per_block=lowercase_ , act_fn=lowercase_ , norm_num_groups=lowercase_ , norm_type=lowercase_ , )
@apply_forward_hook
def UpperCAmelCase_ ( self :str , lowercase_ :torch.FloatTensor , lowercase_ :bool = True )-> VQEncoderOutput:
A__ = self.encoder(lowercase_ )
A__ = self.quant_conv(lowercase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase_ )
@apply_forward_hook
def UpperCAmelCase_ ( self :Dict , lowercase_ :torch.FloatTensor , lowercase_ :bool = False , lowercase_ :bool = True )-> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
A__, A__, A__ = self.quantize(lowercase_ )
else:
A__ = h
A__ = self.post_quant_conv(lowercase_ )
A__ = self.decoder(lowercase_ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
def UpperCAmelCase_ ( self :str , lowercase_ :torch.FloatTensor , lowercase_ :bool = True )-> Union[DecoderOutput, torch.FloatTensor]:
A__ = sample
A__ = self.encode(lowercase_ ).latents
A__ = self.decode(lowercase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
| 440
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowerCamelCase (a__ ):
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Union[str, Any] = tempfile.mkdtemp()
_snake_case : Union[str, Any] = 8
# DPR tok
_snake_case : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_snake_case : List[Any] = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(lowercase__ , exist_ok=lowercase__ )
_snake_case : Union[str, Any] = os.path.join(lowercase__ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_snake_case : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_snake_case : str = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
_snake_case : Optional[int] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_snake_case : str = {'''unk_token''': '''<unk>'''}
_snake_case : Union[str, Any] = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(lowercase__ , exist_ok=lowercase__ )
_snake_case : Dict = os.path.join(lowercase__ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_snake_case : List[str] = os.path.join(lowercase__ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
def UpperCAmelCase_ ( self ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def UpperCAmelCase_ ( self ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
_snake_case : List[str] = os.path.join(self.tmpdirname , '''rag_tokenizer''' )
_snake_case : Optional[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_snake_case : int = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(lowercase__ )
rag_tokenizer.save_pretrained(lowercase__ )
_snake_case : Optional[Any] = RagTokenizer.from_pretrained(lowercase__ , config=lowercase__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowercase__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , lowercase__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : Union[str, Any] = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' )
_snake_case : int = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
_snake_case : Optional[Any] = tokenizer(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Optional[Any] = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' )
_snake_case : str = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
_snake_case : Any = tokenizer(lowercase__ )
self.assertIsNotNone(lowercase__ )
| 47
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCamelCase :
_lowercase : Any = LEDConfig
_lowercase : Any = {}
_lowercase : Optional[Any] = """gelu"""
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Any:
"""simple docstring"""
_snake_case : Dict = parent
_snake_case : Any = batch_size
_snake_case : List[str] = seq_length
_snake_case : Union[str, Any] = is_training
_snake_case : Tuple = use_labels
_snake_case : int = vocab_size
_snake_case : str = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : List[Any] = num_attention_heads
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Optional[int] = max_position_embeddings
_snake_case : Any = eos_token_id
_snake_case : List[Any] = pad_token_id
_snake_case : Optional[int] = bos_token_id
_snake_case : Any = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : Any = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Dict = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
_snake_case : Dict = tf.concat(
[tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , )
_snake_case : Dict = global_attention_mask
return config, inputs_dict
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
_snake_case : int = TFLEDModel(config=lowercase__ ).get_decoder()
_snake_case : Union[str, Any] = inputs_dict['''input_ids''']
_snake_case : List[str] = input_ids[:1, :]
_snake_case : Tuple = inputs_dict['''attention_mask'''][:1, :]
_snake_case : Dict = 1
# first forward pass
_snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ )
_snake_case , _snake_case : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0]
_snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : int = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
"""simple docstring"""
if attention_mask is None:
_snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCamelCase (a__ , a__ , unittest.TestCase ):
_lowercase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowercase : int = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowercase : Dict = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : int = True
_lowercase : List[Any] = False
_lowercase : str = False
_lowercase : Union[str, Any] = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : str = TFLEDModelTester(self )
_snake_case : Union[str, Any] = ConfigTester(self , config_class=lowercase__ )
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = tf.zeros_like(inputs_dict['''attention_mask'''] )
_snake_case : Optional[Any] = 2
_snake_case : Any = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
_snake_case : Dict = True
_snake_case : str = self.model_tester.seq_length
_snake_case : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase__ ):
_snake_case : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase__ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Union[str, Any] = False
_snake_case : List[Any] = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
_snake_case : List[Any] = len(lowercase__ )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
if self.is_encoder_decoder:
_snake_case : Union[str, Any] = model_class(lowercase__ )
_snake_case : List[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_decoder_attentions_output(lowercase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : str = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : int = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
# Check attention is always last and order is fine
_snake_case : int = True
_snake_case : List[str] = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) )
self.assertEqual(model.config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
pass
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
return tf.constant(lowerCAmelCase_ , dtype=tf.intaa )
UpperCAmelCase : Dict = 1E-4
@slow
@require_tf
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
_snake_case : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
_snake_case : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : int = model(**lowercase__ )[0]
_snake_case : Dict = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : List[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
_snake_case : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : List[str] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : Tuple = model(**lowercase__ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : Dict = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
| 47
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__snake_case : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A ( a ):
__UpperCAmelCase : Tuple = ["""pixel_values"""]
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = PILImageResampling.BICUBIC , snake_case_ = True , snake_case_ = None , snake_case_ = True , snake_case_ = 1 / 2_5_5 , snake_case_ = True , snake_case_ = None , snake_case_ = None , snake_case_ = True , **snake_case_ , ) -> None:
super().__init__(**snake_case_ )
_a = size if size is not None else {"shortest_edge": 2_2_4}
_a = get_size_dict(snake_case_ , default_to_square=snake_case_ )
_a = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
_a = get_size_dict(snake_case_ , default_to_square=snake_case_ , param_name="crop_size" )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_a = image_std if image_std is not None else OPENAI_CLIP_STD
_a = do_convert_rgb
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ = PILImageResampling.BICUBIC , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
_a = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_a = get_resize_output_image_size(snake_case_ , size=size["shortest_edge"] , default_to_square=snake_case_ )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
_a = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(snake_case_ , size=(size["height"], size["width"]) , data_format=snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> str:
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ) -> PIL.Image.Image:
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(snake_case_ , param_name="size" , default_to_square=snake_case_ )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(snake_case_ , param_name="crop_size" , default_to_square=snake_case_ )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
_a = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
_a = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
_a = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
_a = {"pixel_values": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 131
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : int = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class A ( a ):
__UpperCAmelCase : Dict = """transfo-xl"""
__UpperCAmelCase : Any = ["""mems"""]
__UpperCAmelCase : Any = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case_=2_6_7_7_3_5 , snake_case_=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0] , snake_case_=1_0_2_4 , snake_case_=1_0_2_4 , snake_case_=1_6 , snake_case_=6_4 , snake_case_=4_0_9_6 , snake_case_=4 , snake_case_=False , snake_case_=1_8 , snake_case_=1_6_0_0 , snake_case_=1_0_0_0 , snake_case_=True , snake_case_=True , snake_case_=0 , snake_case_=-1 , snake_case_=True , snake_case_=0.1 , snake_case_=0.0 , snake_case_=True , snake_case_="normal" , snake_case_=0.01 , snake_case_=0.01 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=0 , **snake_case_ , ) -> str:
_a = vocab_size
_a = []
self.cutoffs.extend(snake_case_ )
if proj_share_all_but_first:
_a = [False] + [True] * len(self.cutoffs )
else:
_a = [False] + [False] * len(self.cutoffs )
_a = d_model
_a = d_embed
_a = d_head
_a = d_inner
_a = div_val
_a = pre_lnorm
_a = n_layer
_a = n_head
_a = mem_len
_a = same_length
_a = attn_type
_a = clamp_len
_a = sample_softmax
_a = adaptive
_a = dropout
_a = dropatt
_a = untie_r
_a = init
_a = init_range
_a = proj_init_std
_a = init_std
_a = layer_norm_epsilon
super().__init__(eos_token_id=snake_case_ , **snake_case_ )
@property
def __lowerCAmelCase ( self ) -> Tuple:
# Message copied from Transformer-XL documentation
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __lowerCAmelCase ( self , snake_case_ ) -> str:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 131
| 1
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
A = True
except ImportError:
A = False
A = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase ( UpperCamelCase : Namespace ) -> List[str]:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase__ ( _UpperCAmelCase ):
'''simple docstring'''
@staticmethod
def _snake_case ( snake_case__ : ArgumentParser ) -> int:
_lowerCamelCase = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=lowercase_ , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=lowercase_ , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=lowercase_ )
def __init__( self : Optional[int] , snake_case__ : bool , snake_case__ : str , snake_case__ : Union[str, Any]=None , *snake_case__ : Dict ) -> Dict:
_lowerCamelCase = testing
_lowerCamelCase = testing_file
_lowerCamelCase = path
def _snake_case ( self : Any ) -> List[str]:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
_lowerCamelCase = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:2_2]]
if len(lowercase_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
_lowerCamelCase = (
Path(lowercase_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
_lowerCamelCase = path_to_transformer_root / """templates""" / """adding_a_new_model"""
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowercase_ ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
_lowerCamelCase = json.load(lowercase_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowercase_ , extra_context=lowercase_ , )
_lowerCamelCase = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:2_2]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
_lowerCamelCase = json.load(lowercase_ )
_lowerCamelCase = configuration["""lowercase_modelname"""]
_lowerCamelCase = configuration["""generate_tensorflow_pytorch_and_flax"""]
os.remove(f"""{directory}/configuration.json""" )
_lowerCamelCase = """PyTorch""" in generate_tensorflow_pytorch_and_flax
_lowerCamelCase = """TensorFlow""" in generate_tensorflow_pytorch_and_flax
_lowerCamelCase = """Flax""" in generate_tensorflow_pytorch_and_flax
_lowerCamelCase = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(lowercase_ , exist_ok=lowercase_ )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=lowercase_ )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , 'w' ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(snake_case__ : int ):
with open(lowercase_ , 'r' ) as f:
_lowerCamelCase = f.readlines()
with open(lowercase_ , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowercase_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(snake_case__ : str , snake_case__ : str , snake_case__ : List[str] ):
# Create temp file
_lowerCamelCase = mkstemp()
_lowerCamelCase = False
with fdopen(lowercase_ , 'w' ) as new_file:
with open(lowercase_ ) as old_file:
for line in old_file:
new_file.write(lowercase_ )
if line_to_copy_below in line:
_lowerCamelCase = True
for line_to_copy in lines_to_copy:
new_file.write(lowercase_ )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(lowercase_ , lowercase_ )
# Remove original file
remove(lowercase_ )
# Move new file
move(lowercase_ , lowercase_ )
def skip_units(snake_case__ : str ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(snake_case__ : int ):
with open(lowercase_ ) as datafile:
_lowerCamelCase = []
_lowerCamelCase = False
_lowerCamelCase = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
_lowerCamelCase = line.split('\"' )[1]
_lowerCamelCase = skip_units(lowercase_ )
elif "# Below: " in line and "##" not in line:
_lowerCamelCase = line.split('\"' )[1]
_lowerCamelCase = skip_units(lowercase_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowercase_ , lowercase_ , lowercase_ )
_lowerCamelCase = []
elif "# Replace with" in line and "##" not in line:
_lowerCamelCase = []
elif "##" not in line:
lines_to_copy.append(lowercase_ )
remove(lowercase_ )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(lowercase_ )
| 708
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCamelCase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int]=10_24 ) -> int:
_lowerCamelCase , _lowerCamelCase = [], []
_lowerCamelCase = list(zip(UpperCamelCase , UpperCamelCase ) )
_lowerCamelCase , _lowerCamelCase = sorted_examples[0]
def is_too_big(UpperCamelCase : Union[str, Any] ):
return tok(UpperCamelCase , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
_lowerCamelCase = new_src + ' ' + src
_lowerCamelCase = new_tgt + ' ' + tgt
if is_too_big(UpperCamelCase ) or is_too_big(UpperCamelCase ): # cant fit, finalize example
finished_src.append(UpperCamelCase )
finished_tgt.append(UpperCamelCase )
_lowerCamelCase , _lowerCamelCase = src, tgt
else: # can fit, keep adding
_lowerCamelCase , _lowerCamelCase = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCamelCase )
finished_tgt.append(UpperCamelCase )
return finished_src, finished_tgt
def lowerCamelCase ( UpperCamelCase : str , UpperCamelCase : Path , UpperCamelCase : int , UpperCamelCase : Optional[Any] ) -> List[Any]:
_lowerCamelCase = Path(UpperCamelCase )
save_path.mkdir(exist_ok=UpperCamelCase )
for split in ["train"]:
_lowerCamelCase , _lowerCamelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
_lowerCamelCase = [x.rstrip() for x in Path(UpperCamelCase ).open().readlines()]
_lowerCamelCase = [x.rstrip() for x in Path(UpperCamelCase ).open().readlines()]
_lowerCamelCase , _lowerCamelCase = pack_examples(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
print(F"""packed {split} split from {len(UpperCamelCase )} examples -> {len(UpperCamelCase )}.""" )
Path(save_path / F"""{split}.source""" ).open('w' ).write('\n'.join(UpperCamelCase ) )
Path(save_path / F"""{split}.target""" ).open('w' ).write('\n'.join(UpperCamelCase ) )
for split in ["val", "test"]:
_lowerCamelCase , _lowerCamelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(UpperCamelCase , save_path / F"""{split}.source""" )
shutil.copyfile(UpperCamelCase , save_path / F"""{split}.target""" )
def lowerCamelCase ( ) -> int:
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=UpperCamelCase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=UpperCamelCase , default=1_28 )
parser.add_argument('--data_dir' , type=UpperCamelCase )
parser.add_argument('--save_path' , type=UpperCamelCase )
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 234
| 0
|
import unittest
import numpy as np
def UpperCAmelCase__ ( __snake_case , __snake_case , __snake_case , __snake_case = None , ) -> np.ndarray:
_A = np.shape(__A )
_A = np.shape(__A )
_A = np.shape(__A )
if shape_a[0] != shape_b[0]:
_A = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(__A )
if shape_b[1] != shape_c[1]:
_A = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(__A )
_A = pseudo_inv
if a_inv is None:
try:
_A = np.linalg.inv(__A )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> None:
"""simple docstring"""
_A = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_A = np.array([[0, 3], [3, 0], [2, 3]] )
_A = np.array([[2, 1], [6, 3]] )
_A = schur_complement(a , a , a )
_A = np.block([[a, b], [b.T, c]] )
_A = np.linalg.det(a )
_A = np.linalg.det(a )
_A = np.linalg.det(a )
self.assertAlmostEqual(a , det_a * det_s )
def lowercase_ ( self ) -> None:
"""simple docstring"""
_A = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_A = np.array([[0, 3], [3, 0], [2, 3]] )
_A = np.array([[2, 1], [6, 3]] )
with self.assertRaises(a ):
schur_complement(a , a , a )
def lowercase_ ( self ) -> None:
"""simple docstring"""
_A = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_A = np.array([[0, 3], [3, 0], [2, 3]] )
_A = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(a ):
schur_complement(a , a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 317
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__UpperCAmelCase = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 651
| 0
|
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_SCREAMING_SNAKE_CASE = "src/transformers"
_SCREAMING_SNAKE_CASE = "docs/source/en"
_SCREAMING_SNAKE_CASE = "."
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
_lowerCAmelCase = f.readlines()
# Find the start prompt.
_lowerCAmelCase = 0
while not lines[start_index].startswith(SCREAMING_SNAKE_CASE_ ):
start_index += 1
start_index += 1
_lowerCAmelCase = start_index
while not lines[end_index].startswith(SCREAMING_SNAKE_CASE_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_SCREAMING_SNAKE_CASE = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
_SCREAMING_SNAKE_CASE = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_SCREAMING_SNAKE_CASE = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_SCREAMING_SNAKE_CASE = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
_SCREAMING_SNAKE_CASE = direct_transformers_import(TRANSFORMERS_PATH)
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , SCREAMING_SNAKE_CASE_ )
return [m.group(0 ) for m in matches]
def __a(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = 2 if text == "✅" or text == "❌" else len(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = (width - text_length) // 2
_lowerCAmelCase = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __a():
'''simple docstring'''
_lowerCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_lowerCAmelCase = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_lowerCAmelCase = {name: config.replace("Config" , "" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_lowerCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = None
if attr_name.endswith("Tokenizer" ):
_lowerCAmelCase = slow_tokenizers
_lowerCAmelCase = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_lowerCAmelCase = fast_tokenizers
_lowerCAmelCase = attr_name[:-13]
elif _re_tf_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
_lowerCAmelCase = tf_models
_lowerCAmelCase = _re_tf_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
elif _re_flax_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
_lowerCAmelCase = flax_models
_lowerCAmelCase = _re_flax_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
elif _re_pt_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
_lowerCAmelCase = pt_models
_lowerCAmelCase = _re_pt_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
if lookup_dict is not None:
while len(SCREAMING_SNAKE_CASE_ ) > 0:
if attr_name in model_name_to_prefix.values():
_lowerCAmelCase = True
break
# Try again after removing the last word in the name
_lowerCAmelCase = "".join(camel_case_split(SCREAMING_SNAKE_CASE_ )[:-1] )
# Let's build that table!
_lowerCAmelCase = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_lowerCAmelCase = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_lowerCAmelCase = [len(SCREAMING_SNAKE_CASE_ ) + 2 for c in columns]
_lowerCAmelCase = max([len(SCREAMING_SNAKE_CASE_ ) for name in model_names] ) + 2
# Build the table per se
_lowerCAmelCase = "|" + "|".join([_center_text(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for c, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_lowerCAmelCase = {True: "✅", False: "❌"}
for name in model_names:
_lowerCAmelCase = model_name_to_prefix[name]
_lowerCAmelCase = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for l, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] ) + "|\n"
return table
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = _find_text_in_file(
filename=os.path.join(SCREAMING_SNAKE_CASE_ , "index.md" ) , start_prompt="<!--This table is updated automatically from the auto modules" , end_prompt="<!-- End table-->" , )
_lowerCAmelCase = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(SCREAMING_SNAKE_CASE_ , "index.md" ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 709
|
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("check_bouncy() accepts only integer arguments" )
_lowerCAmelCase = str(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = "".join(sorted(SCREAMING_SNAKE_CASE_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __a(SCREAMING_SNAKE_CASE_ : float = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
_lowerCAmelCase = 0
_lowerCAmelCase = 1
while True:
if check_bouncy(SCREAMING_SNAKE_CASE_ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 489
| 0
|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
A__ : Optional[int] = logging.get_logger(__name__)
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self , A_=None , **A_ ) -> Tuple:
"""simple docstring"""
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , A_ , )
super().__init__(args=A_ , **A_ )
| 353
|
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
stooge(_UpperCamelCase , 0 , len(_UpperCamelCase ) - 1 )
return arr
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_lowercase , _lowercase: Optional[Any] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_lowercase: Tuple = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_UpperCamelCase , _UpperCamelCase , (h - t) )
# Recursively sort last 2/3 elements
stooge(_UpperCamelCase , i + t , (_UpperCamelCase) )
# Recursively sort first 2/3 elements
stooge(_UpperCamelCase , _UpperCamelCase , (h - t) )
if __name__ == "__main__":
A__ : Dict = input('Enter numbers separated by a comma:\n').strip()
A__ : Optional[Any] = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 353
| 1
|
from __future__ import annotations
from typing import TypedDict
class __SCREAMING_SNAKE_CASE ( _A ):
_UpperCAmelCase : List[str] = 4_2
_UpperCAmelCase : str = 4_2
def _a ( UpperCAmelCase ) -> List[str]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(A_ ) )]
def _a ( UpperCAmelCase ) -> Tuple:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
lowerCamelCase__ : List[Any] = all_rotations(A_ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
lowerCamelCase__ : Tuple = {
'''bwt_string''': ''''''.join([word[-1] for word in rotations] ),
'''idx_original_string''': rotations.index(A_ ),
}
return response
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
lowerCamelCase__ : int = int(A_ )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(A_ ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
lowerCamelCase__ : str = [''''''] * len(A_ )
for _ in range(len(A_ ) ):
for i in range(len(A_ ) ):
lowerCamelCase__ : List[str] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
_A : Tuple = 'Provide a string that I will generate its BWT transform: '
_A : List[Any] = input(entry_msg).strip()
_A : List[Any] = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
_A : Optional[Any] = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
)
| 710
|
from __future__ import annotations
def _a ( UpperCAmelCase ) -> bool:
"""simple docstring"""
lowerCamelCase__ : List[Any] = len(UpperCAmelCase )
# We need to create solution object to save path.
lowerCamelCase__ : Any = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )]
lowerCamelCase__ : int = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase )
if solved:
print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = len(UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
lowerCamelCase__ : str = 1
return True
lowerCamelCase__ : str = (not i < 0) and (not j < 0) # Check lower bounds
lowerCamelCase__ : Optional[int] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowerCamelCase__ : List[str] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowerCamelCase__ : Any = 1
# check for directions
if (
run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase )
or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase )
):
return True
lowerCamelCase__ : Any = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 130
| 0
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : Union[str, Any] = random.Random()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1.0 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
if rng is None:
lowerCamelCase : Dict = global_rng
lowerCamelCase : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=400 , UpperCamelCase__=2000 , UpperCamelCase__=10 , UpperCamelCase__=160 , UpperCamelCase__=8 , UpperCamelCase__=0.0 , UpperCamelCase__=4000 , UpperCamelCase__=False , UpperCamelCase__=True , ) -> Union[str, Any]:
lowerCamelCase : Optional[Any] = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Optional[Any] = min_seq_length
lowerCamelCase : Optional[int] = max_seq_length
lowerCamelCase : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase : int = padding_value
lowerCamelCase : Optional[Any] = sampling_rate
lowerCamelCase : List[str] = return_attention_mask
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Union[str, Any] = feature_size
lowerCamelCase : List[str] = chunk_length
lowerCamelCase : Any = hop_length
def _lowercase ( self ) -> Any:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowercase ( self , UpperCamelCase__=False , UpperCamelCase__=False ) -> Optional[int]:
def _flatten(UpperCamelCase__ ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
lowerCamelCase : Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase : str = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase : Tuple = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = WhisperFeatureExtractor if is_speech_available() else None
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : List[Any] = WhisperFeatureExtractionTester(self )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : Dict = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
lowerCamelCase : int = feat_extract_first.to_dict()
lowerCamelCase : Tuple = feat_extract_second.to_dict()
lowerCamelCase : str = feat_extract_first.mel_filters
lowerCamelCase : int = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self ) -> int:
lowerCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : Optional[int] = os.path.join(UpperCamelCase__ , "feat_extract.json" )
feat_extract_first.to_json_file(UpperCamelCase__ )
lowerCamelCase : Dict = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
lowerCamelCase : Tuple = feat_extract_first.to_dict()
lowerCamelCase : Any = feat_extract_second.to_dict()
lowerCamelCase : Tuple = feat_extract_first.mel_filters
lowerCamelCase : int = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self ) -> Optional[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase : str = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase : Tuple = feature_extractor(UpperCamelCase__ , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase : Union[str, Any] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
lowerCamelCase : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test batched
lowerCamelCase : Dict = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_features
lowerCamelCase : str = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase : List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase : Union[str, Any] = np.asarray(UpperCamelCase__ )
lowerCamelCase : Optional[int] = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_features
lowerCamelCase : int = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test truncation required
lowerCamelCase : List[str] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
lowerCamelCase : Union[str, Any] = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
lowerCamelCase : str = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowerCamelCase : Dict = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs_truncated]
lowerCamelCase : Union[str, Any] = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_features
lowerCamelCase : Optional[int] = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def _lowercase ( self ) -> Optional[int]:
import torch
lowerCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase : Dict = np.random.rand(100 , 32 ).astype(np.floataa )
lowerCamelCase : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase : Tuple = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase : List[str] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : str = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowerCamelCase : List[str] = ds.sort("id" ).select(range(UpperCamelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _lowercase ( self ) -> List[Any]:
# fmt: off
lowerCamelCase : Dict = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
lowerCamelCase : Optional[Any] = self._load_datasamples(1 )
lowerCamelCase : Optional[int] = WhisperFeatureExtractor()
lowerCamelCase : List[str] = feature_extractor(UpperCamelCase__ , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , UpperCamelCase__ , atol=1e-4 ) )
def _lowercase ( self ) -> str:
lowerCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase : Optional[int] = self._load_datasamples(1 )[0]
lowerCamelCase : Optional[int] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
lowerCamelCase : Union[str, Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=UpperCamelCase__ )[0]
self.assertTrue(np.all(np.mean(UpperCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase__ ) - 1 ) < 1e-3 ) )
| 311
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """vit"""
def __init__( self , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=224 , UpperCamelCase__=16 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=16 , **UpperCamelCase__ , ) -> Union[str, Any]:
super().__init__(**UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : int = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : int = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[str] = initializer_range
lowerCamelCase : Optional[int] = layer_norm_eps
lowerCamelCase : List[Any] = image_size
lowerCamelCase : Union[str, Any] = patch_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Union[str, Any] = qkv_bias
lowerCamelCase : Union[str, Any] = encoder_stride
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Any = version.parse("""1.11""" )
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowercase ( self ) -> float:
return 1e-4
| 311
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __UpperCamelCase ( __lowerCamelCase : Optional[int] ) -> Dict:
'''simple docstring'''
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def __UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_a = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
_a = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
_a = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
_a = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
_a = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
_a = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
_a = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
_a = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
_a = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
_a = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
_a = key.replace("image_encoder.module" , "flava.image_model" )
_a = key.replace("text_encoder.module" , "flava.text_model" )
_a = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
_a = key.replace("mm_encoder.module" , "flava.multimodal_model" )
_a = key.replace("text_projection" , "flava.text_projection" )
_a = key.replace("image_projection" , "flava.image_projection" )
_a = value.float()
for key, value in codebook_state_dict.items():
_a = value
return upgrade
@torch.no_grad()
def __UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : int=None ) -> Any:
'''simple docstring'''
if config_path is not None:
_a = FlavaConfig.from_pretrained(__lowerCamelCase )
else:
_a = FlavaConfig()
_a = FlavaForPreTraining(__lowerCamelCase ).eval()
_a = convert_dalle_checkpoint(__lowerCamelCase , __lowerCamelCase , save_checkpoint=__lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
_a = torch.load(__lowerCamelCase , map_location="cpu" )
else:
_a = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="cpu" )
_a = upgrade_state_dict(__lowerCamelCase , __lowerCamelCase )
hf_model.load_state_dict(__lowerCamelCase )
_a = hf_model.state_dict()
_a = count_parameters(__lowerCamelCase )
_a = count_parameters(__lowerCamelCase ) + count_parameters(__lowerCamelCase )
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
hf_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowercase__ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 700
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None , ) -> Tuple:
'''simple docstring'''
_a = {}
if train_file is not None:
_a = [train_file]
if eval_file is not None:
_a = [eval_file]
if test_file is not None:
_a = [test_file]
_a = datasets.load_dataset("csv" , data_files=__lowerCamelCase )
_a = list(ds[list(files.keys() )[0]].features.keys() )
_a = features_name.pop(__lowerCamelCase )
_a = list(set(ds[list(files.keys() )[0]][label_name] ) )
_a = {label: i for i, label in enumerate(__lowerCamelCase )}
_a = tokenizer.model_input_names
_a = {}
if len(__lowerCamelCase ) == 1:
for k in files.keys():
_a = ds[k].map(
lambda __lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" ) , batched=__lowerCamelCase , )
elif len(__lowerCamelCase ) == 2:
for k in files.keys():
_a = ds[k].map(
lambda __lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , ) , batched=__lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
_a = (
tf.data.Dataset.from_generator(
__lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_a = (
tf.data.Dataset.from_generator(
__lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_a = (
tf.data.Dataset.from_generator(
__lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowercase__ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
UpperCAmelCase = field(metadata={'''help''': '''Which column contains the label'''} )
UpperCAmelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''The path of the training file'''} )
UpperCAmelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''The path of the development file'''} )
UpperCAmelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''The path of the test file'''} )
UpperCAmelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class __SCREAMING_SNAKE_CASE :
UpperCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_a , _a , _a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
F"16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_a , _a , _a , _a = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__lowerCamelCase ) , labelaid=__lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_a = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(__lowerCamelCase : EvalPrediction ) -> Dict:
_a = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_a = TFTrainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_a = trainer.evaluate()
_a = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(__lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
results.update(__lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 276
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
# initialize config
if "resnet-50" in model_name:
UpperCAmelCase_ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
UpperCAmelCase_ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
UpperCAmelCase_ = DetrConfig(use_timm_backbone=lowerCAmelCase__ , backbone_config=lowerCAmelCase__ )
# set label attributes
UpperCAmelCase_ = "panoptic" in model_name
if is_panoptic:
UpperCAmelCase_ = 250
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "coco-detection-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def a__ ( lowerCAmelCase__ ):
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
f"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
f"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ = ""
if is_panoptic:
UpperCAmelCase_ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:256, :]
UpperCAmelCase_ = in_proj_bias[:256]
UpperCAmelCase_ = in_proj_weight[256:512, :]
UpperCAmelCase_ = in_proj_bias[256:512]
UpperCAmelCase_ = in_proj_weight[-256:, :]
UpperCAmelCase_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:256, :]
UpperCAmelCase_ = in_proj_bias[:256]
UpperCAmelCase_ = in_proj_weight[256:512, :]
UpperCAmelCase_ = in_proj_bias[256:512]
UpperCAmelCase_ = in_proj_weight[-256:, :]
UpperCAmelCase_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase_ = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase_ = in_proj_weight_cross_attn[:256, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[:256]
UpperCAmelCase_ = in_proj_weight_cross_attn[256:512, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[256:512]
UpperCAmelCase_ = in_proj_weight_cross_attn[-256:, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[-256:]
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=False ):
UpperCAmelCase_ , UpperCAmelCase_ = get_detr_config(lowerCAmelCase__ )
# load original model from torch hub
UpperCAmelCase_ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f"""Converting model {model_name}...""" )
UpperCAmelCase_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=lowerCAmelCase__ ).eval()
UpperCAmelCase_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(lowerCAmelCase__ ):
if is_panoptic:
UpperCAmelCase_ = "detr." + src
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCAmelCase__ , is_panoptic=lowerCAmelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = DetrForSegmentation(lowerCAmelCase__ ) if is_panoptic else DetrForObjectDetection(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# verify our conversion on an image
UpperCAmelCase_ = "coco_panoptic" if is_panoptic else "coco_detection"
UpperCAmelCase_ = DetrImageProcessor(format=lowerCAmelCase__ )
UpperCAmelCase_ = processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
UpperCAmelCase_ = detr(lowerCAmelCase__ )
UpperCAmelCase_ = model(lowerCAmelCase__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(f"""nielsr/{model_name}""" )
processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
lowerCamelCase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 82
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Tuple = ['''image_processor''', '''tokenizer''']
_snake_case : Any = '''ViTImageProcessor'''
_snake_case : str = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCamelCase , )
UpperCAmelCase_ : str = kwargs.pop('feature_extractor' )
UpperCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> Optional[Any]:
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
UpperCAmelCase_ : int = self.tokenizer(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
if visual_prompt is not None:
UpperCAmelCase_ : str = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
if images is not None:
UpperCAmelCase_ : Union[str, Any] = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase_ : Tuple = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase_ : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase_ : Optional[Any] = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCamelCase ) , tensor_type=_UpperCamelCase )
def __UpperCAmelCase ( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> List[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCamelCase , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self ) -> int:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCamelCase , )
return self.image_processor
| 406
| 0
|
import math
import random
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__lowerCamelCase = 0.02
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
A_ = float(2 * (random.randint(1, 1_00 )) - 1 )
for _ in range(UpperCAmelCase__ ):
# Forward propagation
A_ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
A_ = (expected / 1_00) - layer_a
# Error delta
A_ = layer_1_error * sigmoid_function(UpperCAmelCase__, UpperCAmelCase__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase = int(input('''Expected value: '''))
__lowerCamelCase = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 703
|
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 667
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class _a ( UpperCamelCase__ ):
_lowercase : Union[str, Any] = '''luke'''
def __init__( self: Any , UpperCamelCase_: Optional[int]=50_267 , UpperCamelCase_: List[Any]=500_000 , UpperCamelCase_: Optional[int]=768 , UpperCamelCase_: Any=256 , UpperCamelCase_: Union[str, Any]=12 , UpperCamelCase_: str=12 , UpperCamelCase_: Optional[int]=3_072 , UpperCamelCase_: str="gelu" , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Tuple=512 , UpperCamelCase_: Tuple=2 , UpperCamelCase_: Union[str, Any]=0.02 , UpperCamelCase_: Any=1E-1_2 , UpperCamelCase_: str=True , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: List[Any]=1 , UpperCamelCase_: int=0 , UpperCamelCase_: Optional[Any]=2 , **UpperCamelCase_: int , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = vocab_size
lowercase__ = entity_vocab_size
lowercase__ = hidden_size
lowercase__ = entity_emb_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = use_entity_aware_attention
lowercase__ = classifier_dropout
| 43
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCAmelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowerCAmelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowerCAmelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Tuple ) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def lowerCamelCase_ ( self: str , UpperCamelCase_: List[List[List[str]]] , UpperCamelCase_: List[List[str]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase_ , hypotheses=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ )
}
| 43
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Tuple = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94
|
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 200) -> int:
'''simple docstring'''
__UpperCamelCase : Any = [1, 2, 5, 10, 20, 50, 100, 200]
__UpperCamelCase : Any = [0] * (pence + 1)
__UpperCamelCase : Union[str, Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowerCamelCase , pence + 1 , 1):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 94
| 1
|
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Construct model
if gpta_config_file == "":
lowercase = GPTaConfig()
else:
lowercase = GPTaConfig.from_json_file(__SCREAMING_SNAKE_CASE )
lowercase = GPTaModel(__SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_gpta(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
lowercase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowercase = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
UpperCAmelCase = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 84
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowercase ( UpperCamelCase__ ):
_a = "informer"
_a = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , _a = None , _a = None , _a = "student_t" , _a = "nll" , _a = 1 , _a = None , _a = "mean" , _a = 0 , _a = 0 , _a = 0 , _a = 0 , _a = None , _a = None , _a = 64 , _a = 32 , _a = 32 , _a = 2 , _a = 2 , _a = 2 , _a = 2 , _a = True , _a = "gelu" , _a = 0.05 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 100 , _a = 0.02 , _a=True , _a = "prob" , _a = 5 , _a = True , **_a , ) -> Tuple:
# time series specific configuration
_A : Optional[int] = prediction_length
_A : int = context_length or prediction_length
_A : List[str] = distribution_output
_A : Dict = loss
_A : Optional[Any] = input_size
_A : Dict = num_time_features
_A : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_A : Dict = scaling
_A : List[Any] = num_dynamic_real_features
_A : Union[str, Any] = num_static_real_features
_A : Tuple = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
_A : Any = cardinality
else:
_A : Union[str, Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
_A : Tuple = embedding_dimension
else:
_A : Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_A : List[str] = num_parallel_samples
# Transformer architecture configuration
_A : Optional[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
_A : int = d_model
_A : int = encoder_attention_heads
_A : List[str] = decoder_attention_heads
_A : Any = encoder_ffn_dim
_A : Union[str, Any] = decoder_ffn_dim
_A : Dict = encoder_layers
_A : Dict = decoder_layers
_A : Tuple = dropout
_A : Any = attention_dropout
_A : int = activation_dropout
_A : Optional[int] = encoder_layerdrop
_A : List[str] = decoder_layerdrop
_A : Optional[int] = activation_function
_A : Optional[Any] = init_std
_A : Any = use_cache
# Informer
_A : str = attention_type
_A : Any = sampling_factor
_A : Union[str, Any] = distil
super().__init__(is_encoder_decoder=_a , **_a )
@property
def a__ ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 307
| 0
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : int ) -> bool:
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> list[str]:
"""simple docstring"""
A__ = []
A__ = 11
A__ = int("1" + "0" * digit_len )
for num in range(UpperCAmelCase_, UpperCAmelCase_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(UpperCAmelCase_, UpperCAmelCase_ ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
A__ = 10
return solutions
def _lowerCamelCase ( UpperCAmelCase_ : int = 2 ) -> int:
"""simple docstring"""
A__ = 1.0
for fraction in fraction_list(UpperCAmelCase_ ):
A__ = Fraction(UpperCAmelCase_ )
result *= frac.denominator / frac.numerator
return int(UpperCAmelCase_ )
if __name__ == "__main__":
print(solution())
| 562
|
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
UpperCamelCase = None
UpperCamelCase = {
"""7B""": 1_1008,
"""13B""": 1_3824,
"""30B""": 1_7920,
"""65B""": 2_2016,
"""70B""": 2_8672,
}
UpperCamelCase = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def _lowerCamelCase ( UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : Optional[Any]=1, UpperCAmelCase_ : Union[str, Any]=256 ) -> Any:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def _lowerCamelCase ( UpperCAmelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
with open(UpperCAmelCase_, "r" ) as f:
return json.load(UpperCAmelCase_ )
def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCAmelCase_, "w" ) as f:
json.dump(UpperCAmelCase_, UpperCAmelCase_ )
def _lowerCamelCase ( UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : Optional[int]=True ) -> List[Any]:
"""simple docstring"""
os.makedirs(UpperCAmelCase_, exist_ok=UpperCAmelCase_ )
A__ = os.path.join(UpperCAmelCase_, "tmp" )
os.makedirs(UpperCAmelCase_, exist_ok=UpperCAmelCase_ )
A__ = read_json(os.path.join(UpperCAmelCase_, "params.json" ) )
A__ = NUM_SHARDS[model_size]
A__ = params["n_layers"]
A__ = params["n_heads"]
A__ = n_heads // num_shards
A__ = params["dim"]
A__ = dim // n_heads
A__ = 1_0000.0
A__ = 1.0 / (base ** (torch.arange(0, UpperCAmelCase_, 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
A__ = params["n_kv_heads"] # for GQA / MQA
A__ = n_heads_per_shard // num_key_value_heads
A__ = dim // num_key_value_heads
else: # compatibility with other checkpoints
A__ = n_heads
A__ = n_heads_per_shard
A__ = dim
# permute for sliced rotary
def permute(UpperCAmelCase_ : Optional[Any], UpperCAmelCase_ : List[str]=n_heads, UpperCAmelCase_ : List[str]=dim, UpperCAmelCase_ : str=dim ):
return w.view(UpperCAmelCase_, dima // n_heads // 2, 2, UpperCAmelCase_ ).transpose(1, 2 ).reshape(UpperCAmelCase_, UpperCAmelCase_ )
print(F"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
A__ = torch.load(os.path.join(UpperCAmelCase_, "consolidated.00.pth" ), map_location="cpu" )
else:
# Sharded
A__ = [
torch.load(os.path.join(UpperCAmelCase_, F"""consolidated.{i:02d}.pth""" ), map_location="cpu" )
for i in range(UpperCAmelCase_ )
]
A__ = 0
A__ = {"weight_map": {}}
for layer_i in range(UpperCAmelCase_ ):
A__ = F"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
A__ = {
F"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wq.weight"""] ),
F"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wk.weight"""] ),
F"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[F"""layers.{layer_i}.attention.wv.weight"""],
F"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[F"""layers.{layer_i}.attention.wo.weight"""],
F"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w1.weight"""],
F"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w2.weight"""],
F"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w3.weight"""],
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[F"""layers.{layer_i}.attention_norm.weight"""],
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[F"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
A__ = {
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.attention_norm.weight"""
].clone(),
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
A__ = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wq.weight"""].view(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
for i in range(UpperCAmelCase_ )
], dim=0, ).reshape(UpperCAmelCase_, UpperCAmelCase_ ) )
A__ = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wk.weight"""].view(
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
for i in range(UpperCAmelCase_ )
], dim=0, ).reshape(UpperCAmelCase_, UpperCAmelCase_ ), UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, )
A__ = torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wv.weight"""].view(
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
for i in range(UpperCAmelCase_ )
], dim=0, ).reshape(UpperCAmelCase_, UpperCAmelCase_ )
A__ = torch.cat(
[loaded[i][F"""layers.{layer_i}.attention.wo.weight"""] for i in range(UpperCAmelCase_ )], dim=1 )
A__ = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(UpperCAmelCase_ )], dim=0 )
A__ = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(UpperCAmelCase_ )], dim=1 )
A__ = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(UpperCAmelCase_ )], dim=0 )
A__ = inv_freq
for k, v in state_dict.items():
A__ = filename
param_count += v.numel()
torch.save(UpperCAmelCase_, os.path.join(UpperCAmelCase_, UpperCAmelCase_ ) )
A__ = F"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
A__ = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
A__ = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(UpperCAmelCase_ )], dim=1 ),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(UpperCAmelCase_ )], dim=0 ),
}
for k, v in state_dict.items():
A__ = filename
param_count += v.numel()
torch.save(UpperCAmelCase_, os.path.join(UpperCAmelCase_, UpperCAmelCase_ ) )
# Write configs
A__ = {"total_size": param_count * 2}
write_json(UpperCAmelCase_, os.path.join(UpperCAmelCase_, "pytorch_model.bin.index.json" ) )
A__ = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
A__ = params["multiple_of"] if "multiple_of" in params else 256
A__ = LlamaConfig(
hidden_size=UpperCAmelCase_, intermediate_size=compute_intermediate_size(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ ), num_attention_heads=params["n_heads"], num_hidden_layers=params["n_layers"], rms_norm_eps=params["norm_eps"], num_key_value_heads=UpperCAmelCase_, )
config.save_pretrained(UpperCAmelCase_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model." )
A__ = LlamaForCausalLM.from_pretrained(UpperCAmelCase_, torch_dtype=torch.floataa, low_cpu_mem_usage=UpperCAmelCase_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format." )
model.save_pretrained(UpperCAmelCase_, safe_serialization=UpperCAmelCase_ )
shutil.rmtree(UpperCAmelCase_ )
def _lowerCamelCase ( UpperCAmelCase_ : Optional[Any], UpperCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
A__ = tokenizer_class(UpperCAmelCase_ )
tokenizer.save_pretrained(UpperCAmelCase_ )
def _lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ = argparse.ArgumentParser()
parser.add_argument(
"--input_dir", help="Location of LLaMA weights, which contains tokenizer.model and model folders", )
parser.add_argument(
"--model_size", choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"], )
parser.add_argument(
"--output_dir", help="Location to write HF model and tokenizer", )
parser.add_argument("--safe_serialization", type=UpperCAmelCase_, help="Whether or not to save using `safetensors`." )
A__ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir, input_base_path=os.path.join(args.input_dir, args.model_size ), model_size=args.model_size, safe_serialization=args.safe_serialization, )
A__ = os.path.join(args.input_dir, "tokenizer.model" )
write_tokenizer(args.output_dir, UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 562
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : List[str] = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''resnet'''
_lowerCamelCase = ['''basic''', '''bottleneck''']
def __init__( self , _lowercase=3 , _lowercase=6_4 , _lowercase=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , _lowercase=[3, 4, 6, 3] , _lowercase="bottleneck" , _lowercase="relu" , _lowercase=False , _lowercase=None , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
snake_case_ : Union[str, Any] = num_channels
snake_case_ : str = embedding_size
snake_case_ : List[Any] = hidden_sizes
snake_case_ : Optional[Any] = depths
snake_case_ : Optional[int] = layer_type
snake_case_ : int = hidden_act
snake_case_ : Any = downsample_in_first_stage
snake_case_ : int = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : List[Any] = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-3
| 58
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "width_multiplier" ) )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]=1_3 , SCREAMING_SNAKE_CASE : int=6_4 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : Dict="swish" , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : Any=3_2 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : int=1_0 , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Optional[Any]=0.2_5 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : int=0.0 , ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = make_divisible(5_1_2 * width_multiplier , divisor=8 )
lowerCAmelCase = hidden_act
lowerCAmelCase = conv_kernel_size
lowerCAmelCase = output_stride
lowerCAmelCase = classifier_dropout_prob
lowerCAmelCase = use_labels
lowerCAmelCase = is_training
lowerCAmelCase = num_labels
lowerCAmelCase = initializer_range
lowerCAmelCase = scope
lowerCAmelCase = width_multiplier
lowerCAmelCase = ffn_dropout
lowerCAmelCase = attn_dropout
def __A ( self : int ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self : int ) -> Dict:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __A ( self : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase = MobileViTVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def __A ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = MobileViTVaModelTester(self )
lowerCAmelCase = MobileViTVaConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE )
def __A ( self : Tuple ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def __A ( self : str ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def __A ( self : Dict ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def __A ( self : Tuple ) -> Dict:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def __A ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __A ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def __A ( self : Any ) -> str:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __A ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = outputs.hidden_states
lowerCAmelCase = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCAmelCase = 2
for i in range(len(SCREAMING_SNAKE_CASE ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __A ( self : Tuple ) -> int:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def __A ( self : Dict ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE )
@slow
def __A ( self : Any ) -> Dict:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __a ( ) -> List[Any]:
lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : str ) -> Any:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def __A ( self : List[str] ) -> int:
"""simple docstring"""
lowerCAmelCase = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowerCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def __A ( self : Any ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase = model.to(SCREAMING_SNAKE_CASE )
lowerCAmelCase = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.logits
# verify the logits
lowerCAmelCase = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def __A ( self : Any ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase = model.to(SCREAMING_SNAKE_CASE )
lowerCAmelCase = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.logits.detach().cpu()
lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE , target_sizes=[(5_0, 6_0)] )
lowerCAmelCase = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE )
| 649
| 0
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class UpperCAmelCase__ :
'''simple docstring'''
pass
| 714
|
from math import sqrt
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
__UpperCAmelCase : Optional[int] = True
# 0 and 1 are none primes.
if number <= 1:
__UpperCAmelCase : Union[str, Any] = False
for divisor in range(2 , int(round(sqrt(_UpperCAmelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__UpperCAmelCase : Optional[Any] = False
break
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'status' must been from type bool"
return status
def a ( _UpperCAmelCase : List[Any] ):
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__UpperCAmelCase : List[str] = list(range(2 , n + 1 ) )
__UpperCAmelCase : Optional[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_UpperCAmelCase ) ):
for j in range(i + 1 , len(_UpperCAmelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__UpperCAmelCase : Dict = 0
# filters actual prime numbers.
__UpperCAmelCase : int = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type list"
return ans
def a ( _UpperCAmelCase : Any ):
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2"
__UpperCAmelCase : str = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_UpperCAmelCase ):
ans.append(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type list"
return ans
def a ( _UpperCAmelCase : List[str] ):
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and number >= 0, "'number' must been an int and >= 0"
__UpperCAmelCase : List[str] = [] # this list will be returns of the function.
# potential prime number factors.
__UpperCAmelCase : Optional[int] = 2
__UpperCAmelCase : Any = number
if number == 0 or number == 1:
ans.append(_UpperCAmelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_UpperCAmelCase ):
while quotient != 1:
if is_prime(_UpperCAmelCase ) and (quotient % factor == 0):
ans.append(_UpperCAmelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type list"
return ans
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__UpperCAmelCase : List[str] = 0
# prime factorization of 'number'
__UpperCAmelCase : int = prime_factorization(_UpperCAmelCase )
__UpperCAmelCase : int = max(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type int"
return ans
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__UpperCAmelCase : Optional[Any] = 0
# prime factorization of 'number'
__UpperCAmelCase : Optional[int] = prime_factorization(_UpperCAmelCase )
__UpperCAmelCase : int = min(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type int"
return ans
def a ( _UpperCAmelCase : Dict ):
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _UpperCAmelCase ), "compare bust been from type bool"
return number % 2 == 0
def a ( _UpperCAmelCase : List[str] ):
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _UpperCAmelCase ), "compare bust been from type bool"
return number % 2 != 0
def a ( _UpperCAmelCase : str ):
'''simple docstring'''
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (number > 2) and is_even(_UpperCAmelCase )
), "'number' must been an int, even and > 2"
__UpperCAmelCase : Dict = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__UpperCAmelCase : Optional[int] = get_prime_numbers(_UpperCAmelCase )
__UpperCAmelCase : List[str] = len(_UpperCAmelCase )
# run variable for while-loops.
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Optional[Any] = None
# exit variable. for break up the loops
__UpperCAmelCase : List[Any] = True
while i < len_pn and loop:
__UpperCAmelCase : Union[str, Any] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__UpperCAmelCase : Any = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (len(_UpperCAmelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a ( _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] ):
'''simple docstring'''
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__UpperCAmelCase : str = 0
while numbera != 0:
__UpperCAmelCase : int = numbera % numbera
__UpperCAmelCase : Union[str, Any] = numbera
__UpperCAmelCase : str = rest
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a ( _UpperCAmelCase : Dict , _UpperCAmelCase : str ):
'''simple docstring'''
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__UpperCAmelCase : int = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__UpperCAmelCase : int = prime_factorization(_UpperCAmelCase )
__UpperCAmelCase : Tuple = prime_factorization(_UpperCAmelCase )
elif numbera == 1 or numbera == 1:
__UpperCAmelCase : Dict = []
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Union[str, Any] = max(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__UpperCAmelCase : int = prime_fac_a.count(_UpperCAmelCase )
__UpperCAmelCase : List[Any] = prime_fac_a.count(_UpperCAmelCase )
for _ in range(max(_UpperCAmelCase , _UpperCAmelCase ) ):
ans *= n
else:
__UpperCAmelCase : Any = prime_fac_a.count(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
ans *= n
done.append(_UpperCAmelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__UpperCAmelCase : List[str] = prime_fac_a.count(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
ans *= n
done.append(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a ( _UpperCAmelCase : Optional[int] ):
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 0), "'number' must been a positive int"
__UpperCAmelCase : str = 0
__UpperCAmelCase : str = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_UpperCAmelCase ):
ans += 1
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and is_prime(
_UpperCAmelCase ), "'ans' must been a prime number and from type int"
return ans
def a ( _UpperCAmelCase : Tuple , _UpperCAmelCase : str ):
'''simple docstring'''
assert (
is_prime(_UpperCAmelCase ) and is_prime(_UpperCAmelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__UpperCAmelCase : Union[str, Any] = p_number_a + 1 # jump to the next number
__UpperCAmelCase : Union[str, Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_UpperCAmelCase ):
number += 1
while number < p_number_a:
ans.append(_UpperCAmelCase )
number += 1
# fetch the next prime number.
while not is_prime(_UpperCAmelCase ):
number += 1
# precondition
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and ans[0] != p_number_a
and ans[len(_UpperCAmelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a ( _UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 1), "'n' must been int and >= 1"
__UpperCAmelCase : Any = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_UpperCAmelCase )
# precondition
assert ans[0] == 1 and ans[len(_UpperCAmelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a ( _UpperCAmelCase : Dict ):
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number > 1
), "'number' must been an int and >= 1"
__UpperCAmelCase : Union[str, Any] = get_divisors(_UpperCAmelCase )
# precondition
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (divisors[0] == 1)
and (divisors[len(_UpperCAmelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ):
'''simple docstring'''
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__UpperCAmelCase : Union[str, Any] = gcd(abs(_UpperCAmelCase ) , abs(_UpperCAmelCase ) )
# precondition
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a ( _UpperCAmelCase : Tuple ):
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 0), "'n' must been a int and >= 0"
__UpperCAmelCase : List[str] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a ( _UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 0), "'n' must been an int and >= 0"
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : List[Any] = 1 # this will be return
for _ in range(n - 1 ):
__UpperCAmelCase : Optional[int] = ans
ans += fiba
__UpperCAmelCase : int = tmp
return ans
| 241
| 0
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __UpperCamelCase ( lowercase__ : str ) -> str:
'''simple docstring'''
return "".join(sorted(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : str ) -> list[str]:
'''simple docstring'''
return word_by_signature[signature(lowercase__ )]
__UpperCAmelCase = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
__UpperCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
__UpperCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__UpperCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 600
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __a ( unittest.TestCase ):
@require_torch
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
lowerCAmelCase_ : List[str] = load_dataset("""ashraq/esc50""" )
lowerCAmelCase_ : Tuple = dataset["""train"""]["""audio"""][-1]["""array"""]
lowerCAmelCase_ : int = audio_classifier(UpperCAmelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def A ( self : Union[str, Any] ):
pass
@slow
@require_torch
def A ( self : Optional[int] ):
lowerCAmelCase_ : int = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
lowerCAmelCase_ : Dict = load_dataset("""ashraq/esc50""" )
lowerCAmelCase_ : Tuple = dataset["""train"""]["""audio"""][-1]["""array"""]
lowerCAmelCase_ : Union[str, Any] = audio_classifier(UpperCAmelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
lowerCAmelCase_ : Optional[int] = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
lowerCAmelCase_ : Dict = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def A ( self : Optional[int] ):
pass
| 600
| 1
|
from manim import *
class lowercase ( _UpperCAmelCase ):
'''simple docstring'''
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE__ : List[Any] = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE__ : Tuple = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : Any = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : str = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : Any = Text('''CPU''' , font_size=24 )
SCREAMING_SNAKE_CASE__ : List[str] = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE__ : int = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : Tuple = Text('''GPU''' , font_size=24 )
SCREAMING_SNAKE_CASE__ : List[str] = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.move_to([-1, -1, 0] )
self.add(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : Dict = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : int = Text('''Model''' , font_size=24 )
SCREAMING_SNAKE_CASE__ : Tuple = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.add(_lowercase )
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for i, rect in enumerate(_lowercase ):
SCREAMING_SNAKE_CASE__ : Any = fill.copy().set_fill(_lowercase , opacity=0.8 )
target.move_to(_lowercase )
model_arr.append(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : int = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = Text('''Disk''' , font_size=24 )
SCREAMING_SNAKE_CASE__ : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
disk.move_to([-4, -1.25, 0] )
self.add(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE__ : Any = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(_lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Dict = Square(0.3 )
input.set_fill(_lowercase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _lowercase , buff=0.5 )
self.play(Write(_lowercase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_lowercase , buff=0.02 )
self.play(MoveToTarget(_lowercase ) )
self.play(FadeOut(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Arrow(start=_lowercase , end=_lowercase , color=_lowercase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _lowercase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE__ : Any = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=3 ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(_lowercase ) , Circumscribe(model_arr[0] , color=_lowercase , **_lowercase ) , Circumscribe(model_cpu_arr[0] , color=_lowercase , **_lowercase ) , Circumscribe(gpu_rect[0] , color=_lowercase , **_lowercase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE__ : Any = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _lowercase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE__ : List[str] = AnimationGroup(
FadeOut(_lowercase , run_time=0.5 ) , MoveToTarget(_lowercase , run_time=0.5 ) , FadeIn(_lowercase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_lowercase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE__ : Any = 0.7
self.play(
Circumscribe(model_arr[i] , **_lowercase ) , Circumscribe(cpu_left_col_base[i] , **_lowercase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_lowercase , **_lowercase ) , Circumscribe(gpu_rect[0] , color=_lowercase , **_lowercase ) , Circumscribe(model_arr[i + 1] , color=_lowercase , **_lowercase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_lowercase , **_lowercase ) , Circumscribe(cpu_left_col_base[-1] , color=_lowercase , **_lowercase ) , Circumscribe(gpu_rect[0] , color=_lowercase , **_lowercase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE__ : List[str] = a_c
SCREAMING_SNAKE_CASE__ : int = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_lowercase ) , FadeOut(_lowercase , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE__ : str = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=3 ) , MoveToTarget(_lowercase ) )
self.wait()
| 717
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a_ :Tuple = logging.get_logger(__name__)
def a ( A__ , A__=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def a ( A__ , A__ , A__=False ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE__ : str = ''''''
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : str = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def a ( A__ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def a ( A__ , A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(A__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = val
def a ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ : List[Any] = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def a ( A__ , A__ , A__=False ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=A__ , )
SCREAMING_SNAKE_CASE__ : Dict = ViTHybridConfig(backbone_config=A__ , image_size=3_8_4 , num_labels=1_0_0_0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
# load original model from timm
SCREAMING_SNAKE_CASE__ : int = timm.create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(A__ )
SCREAMING_SNAKE_CASE__ : Dict = create_rename_keys(A__ , A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , A__ )
SCREAMING_SNAKE_CASE__ : Tuple = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE__ : Tuple = json.load(open(hf_hub_download(A__ , A__ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ : int = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Optional[int] = idalabel
SCREAMING_SNAKE_CASE__ : int = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ViTHybridModel(A__ ).eval()
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ViTHybridForImageClassification(A__ ).eval()
model.load_state_dict(A__ )
# create image processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = create_transform(**resolve_data_config({} , model=A__ ) )
SCREAMING_SNAKE_CASE__ : Tuple = transform.transforms
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE__ : List[Any] = ViTHybridImageProcessor(
do_resize=A__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=A__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE__ : Dict = prepare_img()
SCREAMING_SNAKE_CASE__ : Any = transform(A__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Dict = processor(A__ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(A__ , A__ )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(A__ )
SCREAMING_SNAKE_CASE__ : Dict = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
SCREAMING_SNAKE_CASE__ : str = timm_model.forward_features(A__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(A__ , outputs.pooler_output , atol=1e-3 )
else:
SCREAMING_SNAKE_CASE__ : Dict = timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(A__ ).mkdir(exist_ok=A__ )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(A__ )
if push_to_hub:
print(f"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(f"""ybelkada/{vit_name}""" )
processor.push_to_hub(f"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
a_ :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
a_ :Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 250
| 0
|
"""simple docstring"""
import math
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 ) ->list:
"""simple docstring"""
lowerCAmelCase__ :List[str] = end or len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :List[Any] = i
lowerCAmelCase__ :Any = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCAmelCase__ :str = array[temp_index - 1]
temp_index -= 1
lowerCAmelCase__ :Tuple = temp_index_value
return array
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->None: # Max Heap
"""simple docstring"""
lowerCAmelCase__ :Any = index
lowerCAmelCase__ :Optional[int] = 2 * index + 1 # Left Node
lowerCAmelCase__ :int = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCAmelCase__ :Dict = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCAmelCase__ :str = right_index
if largest != index:
lowerCAmelCase__ , lowerCAmelCase__ :Dict = array[largest], array[index]
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->list:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
lowerCAmelCase__ , lowerCAmelCase__ :int = array[0], array[i]
heapify(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
return array
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = low
lowerCAmelCase__ :int = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = array[j], array[i]
i += 1
def __A (_SCREAMING_SNAKE_CASE ) ->list:
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return array
lowerCAmelCase__ :Dict = 2 * math.ceil(math.loga(len(_SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase__ :Tuple = 16
return intro_sort(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_SCREAMING_SNAKE_CASE )
max_depth -= 1
lowerCAmelCase__ :Any = median_of_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
lowerCAmelCase__ :Tuple = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
intro_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = p
return insertion_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = input("""Enter numbers separated by a comma : """).strip()
__A = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 93
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : str = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Any = features.copy() if features else default_expected_features
__UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
__UpperCAmelCase : Dict = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[int] = tmp_path / """cache"""
__UpperCAmelCase : str = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__UpperCAmelCase : Optional[int] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Dict = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : int = tmp_path / """cache"""
__UpperCAmelCase : int = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 63
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''timesformer'''
def __init__( self : Optional[Any] , A : str=2_24 , A : List[str]=16 , A : Any=3 , A : Optional[Any]=8 , A : Optional[Any]=7_68 , A : str=12 , A : int=12 , A : str=30_72 , A : Optional[Any]="gelu" , A : Tuple=0.0 , A : str=0.0 , A : Union[str, Any]=0.0_2 , A : List[Any]=1E-6 , A : Any=True , A : Tuple="divided_space_time" , A : Optional[int]=0 , **A : Tuple , ) -> str:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_frames
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = attention_type
_UpperCAmelCase = drop_path_rate
| 701
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
while second != 0:
_UpperCAmelCase = first & second
first ^= second
_UpperCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter the first number: ").strip())
UpperCAmelCase__ = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 639
| 0
|
'''simple docstring'''
from statistics import mean, stdev
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data]
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Tuple = mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
| 71
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
class a__( snake_case__ ):
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> None:
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 538
| 0
|
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: int = 200 ):
"""simple docstring"""
_lowerCAmelCase = [1, 2, 5, 10, 20, 50, 100, 200]
_lowerCAmelCase = [0] * (pence + 1)
_lowerCAmelCase = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(SCREAMING_SNAKE_CASE , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2
| 491
|
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
_snake_case = parser.parse_args()
_snake_case = '''cpu'''
_snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
_snake_case = '''path-to-your-trained-model'''
_snake_case = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_snake_case = pipe.to(device)
# to channels last
_snake_case = pipe.unet.to(memory_format=torch.channels_last)
_snake_case = pipe.vae.to(memory_format=torch.channels_last)
_snake_case = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_snake_case = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_snake_case = torch.randn(2, 4, 6_4, 6_4)
_snake_case = torch.rand(1) * 9_9_9
_snake_case = torch.randn(2, 7_7, 7_6_8)
_snake_case = (sample, timestep, encoder_hidden_status)
try:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_snake_case = 6_6_6
_snake_case = torch.Generator(device).manual_seed(seed)
_snake_case = {'''generator''': generator}
if args.steps is not None:
_snake_case = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_snake_case = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 491
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( a_ ):
"""simple docstring"""
__magic_name__ :int = ["""image_processor""", """tokenizer"""]
__magic_name__ :Optional[int] = """Pix2StructImageProcessor"""
__magic_name__ :Union[str, Any] = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = False
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 2_0_4_8 , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowerCAmelCase__ :Tuple = self.tokenizer
lowerCAmelCase__ :Any = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowerCAmelCase__ :Optional[int] = self.image_processor(
lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , max_patches=lowerCAmelCase__ , **lowerCAmelCase__ )
else:
# add pixel_values and bbox
lowerCAmelCase__ :Dict = self.image_processor(
lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None and not self.image_processor.is_vqa:
lowerCAmelCase__ :Union[str, Any] = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if "attention_mask" in text_encoding:
lowerCAmelCase__ :List[str] = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
lowerCAmelCase__ :Optional[int] = text_encoding.pop('input_ids' )
else:
lowerCAmelCase__ :List[Any] = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase__ )
return encoding_image_processor
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.tokenizer.model_input_names
lowerCAmelCase__ :List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 93
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar('''KEY''')
__lowerCamelCase : int = TypeVar('''VAL''')
@dataclass(frozen=a_ , slots=a_ )
class A_ (Generic[KEY, VAL] ):
"""simple docstring"""
a__ = 42
a__ = 42
class A_ (_Item ):
"""simple docstring"""
def __init__( self :List[Any] ) -> None:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __bool__( self :Optional[int] ) -> bool:
'''simple docstring'''
return False
__lowerCamelCase : Dict = _DeletedItem()
class A_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :float = 0.7_5 ) -> None:
'''simple docstring'''
snake_case_ : Any = initial_block_size
snake_case_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case_ : Tuple = capacity_factor
snake_case_ : List[Any] = 0
def _A ( self :Tuple , lowerCAmelCase__ :KEY ) -> int:
'''simple docstring'''
return hash(lowerCAmelCase__ ) % len(self._buckets )
def _A ( self :Any , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> bool:
'''simple docstring'''
snake_case_ : Optional[int] = self._buckets[ind]
if not stored:
snake_case_ : int = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
self._len += 1
return True
elif stored.key == key:
snake_case_ : Optional[int] = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
return True
else:
return False
def _A ( self :int ) -> bool:
'''simple docstring'''
snake_case_ : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCAmelCase__ )
def _A ( self :Any ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case_ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _A ( self :Tuple , lowerCAmelCase__ :int ) -> None:
'''simple docstring'''
snake_case_ : Tuple = self._buckets
snake_case_ : int = [None] * new_size
snake_case_ : Any = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _A ( self :Optional[int] ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def _A ( self :str ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def _A ( self :Optional[int] , lowerCAmelCase__ :KEY ) -> Iterator[int]:
'''simple docstring'''
snake_case_ : str = self._get_bucket_index(lowerCAmelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
snake_case_ : List[Any] = self._get_next_ind(lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
if self._try_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
break
def __setitem__( self :Optional[int] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCAmelCase__ , lowerCAmelCase__ )
def __delitem__( self :List[Any] , lowerCAmelCase__ :KEY ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : int = self._buckets[ind]
if item is None:
raise KeyError(lowerCAmelCase__ )
if item is _deleted:
continue
if item.key == key:
snake_case_ : List[str] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self :List[str] , lowerCAmelCase__ :KEY ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCAmelCase__ )
def __len__( self :Optional[Any] ) -> int:
'''simple docstring'''
return self._len
def __iter__( self :List[Any] ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 653
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = "decision_transformer"
_SCREAMING_SNAKE_CASE : Optional[Any] = ["past_key_values"]
_SCREAMING_SNAKE_CASE : str = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _A=17 , _A=4 , _A=128 , _A=4096 , _A=True , _A=1 , _A=1024 , _A=3 , _A=1 , _A=None , _A="relu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=1e-5 , _A=0.02 , _A=True , _A=True , _A=50256 , _A=50256 , _A=False , _A=False , **_A , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Dict = state_dim
_UpperCAmelCase : Optional[Any] = act_dim
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Any = max_ep_len
_UpperCAmelCase : int = action_tanh
_UpperCAmelCase : Dict = vocab_size
_UpperCAmelCase : Any = n_positions
_UpperCAmelCase : Dict = n_layer
_UpperCAmelCase : Union[str, Any] = n_head
_UpperCAmelCase : str = n_inner
_UpperCAmelCase : Union[str, Any] = activation_function
_UpperCAmelCase : Optional[int] = resid_pdrop
_UpperCAmelCase : int = embd_pdrop
_UpperCAmelCase : Optional[Any] = attn_pdrop
_UpperCAmelCase : Tuple = layer_norm_epsilon
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Dict = scale_attn_weights
_UpperCAmelCase : Tuple = use_cache
_UpperCAmelCase : str = scale_attn_by_inverse_layer_idx
_UpperCAmelCase : Tuple = reorder_and_upcast_attn
_UpperCAmelCase : Tuple = bos_token_id
_UpperCAmelCase : List[Any] = eos_token_id
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A)
| 186
|
import argparse
import os
import re
SCREAMING_SNAKE_CASE = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
SCREAMING_SNAKE_CASE = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
SCREAMING_SNAKE_CASE = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def _lowerCamelCase ( __A : Optional[int] , __A : bool = False ) -> int:
with open(__A , '''r''' , encoding='''utf-8''' ) as f:
_UpperCAmelCase : Union[str, Any] = f.read()
_UpperCAmelCase : Any = content.split('''\n''' )
_UpperCAmelCase : Any = []
_UpperCAmelCase : Tuple = 0
while line_idx < len(__A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_UpperCAmelCase : Union[str, Any] = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
_UpperCAmelCase : str = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_UpperCAmelCase : List[str] = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_UpperCAmelCase : Tuple = sorted(__A , key=lambda __A : _re_identifier.search(__A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(__A ) )
elif "\n".join(__A ) != content:
return True
def _lowerCamelCase ( __A : bool = False ) -> List[str]:
_UpperCAmelCase : List[str] = [os.path.join(__A , __A ) for f in os.listdir(__A ) if f.endswith('''.py''' )]
_UpperCAmelCase : List[Any] = [sort_auto_mapping(__A , overwrite=__A ) for fname in fnames]
if not overwrite and any(__A ):
_UpperCAmelCase : Optional[int] = [f for f, d in zip(__A , __A ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {', '.join(__A )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
SCREAMING_SNAKE_CASE = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 186
| 1
|
"""simple docstring"""
import math
def lowerCamelCase_( _lowerCamelCase ) -> list[int]:
'''simple docstring'''
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[int] = 2
_lowerCamelCase : Tuple = int(math.sqrt(_lowerCamelCase ) ) # Size of every segment
_lowerCamelCase : Tuple = [True] * (end + 1)
_lowerCamelCase : Union[str, Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(_lowerCamelCase )
for i in range(start * start , end + 1 , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = False
start += 1
prime += in_prime
_lowerCamelCase : Optional[int] = end + 1
_lowerCamelCase : Tuple = min(2 * end , _lowerCamelCase )
while low <= n:
_lowerCamelCase : List[Any] = [True] * (high - low + 1)
for each in in_prime:
_lowerCamelCase : List[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_lowerCamelCase , high + 1 , _lowerCamelCase ):
_lowerCamelCase : Optional[Any] = False
for j in range(len(_lowerCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
_lowerCamelCase : Optional[int] = high + 1
_lowerCamelCase : Union[str, Any] = min(high + end , _lowerCamelCase )
return prime
print(sieve(10**6))
| 46
|
import re
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(_A , _A ) )
if __name__ == "__main__":
_A = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 431
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Tuple =logging.get_logger(__name__)
class _UpperCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
__snake_case = 'upernet'
def __init__( self , _lowercase=None , _lowercase=512 , _lowercase=0.02 , _lowercase=[1, 2, 3, 6] , _lowercase=True , _lowercase=0.4 , _lowercase=384 , _lowercase=256 , _lowercase=1 , _lowercase=False , _lowercase=255 , **_lowercase , ) -> List[str]:
super().__init__(**_lowercase )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_lowerCamelCase : str = CONFIG_MAPPING['resnet'](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(_lowercase , _lowercase ):
_lowerCamelCase : Any = backbone_config.get('''model_type''' )
_lowerCamelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase : Tuple = config_class.from_dict(_lowercase )
_lowerCamelCase : int = backbone_config
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[Any] = pool_scales
_lowerCamelCase : Union[str, Any] = use_auxiliary_head
_lowerCamelCase : Dict = auxiliary_loss_weight
_lowerCamelCase : List[str] = auxiliary_in_channels
_lowerCamelCase : List[str] = auxiliary_channels
_lowerCamelCase : Optional[Any] = auxiliary_num_convs
_lowerCamelCase : Tuple = auxiliary_concat_input
_lowerCamelCase : Any = loss_ignore_index
def a__ ( self ) -> Optional[Any]:
_lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ )
_lowerCamelCase : int = self.backbone_config.to_dict()
_lowerCamelCase : Union[str, Any] = self.__class__.model_type
return output
| 707
|
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=0 ) -> List[Any]:
_lowerCamelCase : Tuple = 1.0 if scale is None else scale
_lowerCamelCase : int = 0.0 if loc is None else loc
super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] )
@property
def a__ ( self ) -> Dict:
return self.base_dist.mean * self.scale + self.loc
@property
def a__ ( self ) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def a__ ( self ) -> Union[str, Any]:
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> None:
super().__init__(**_lowercase )
_lowerCamelCase : Union[str, Any] = args_dim
_lowerCamelCase : Union[str, Any] = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] )
_lowerCamelCase : str = domain_map
def a__ ( self , _lowercase ) -> Tuple[torch.Tensor]:
_lowerCamelCase : Any = [proj(_lowercase ) for proj in self.proj]
return self.domain_map(*_lowercase )
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
super().__init__()
_lowerCamelCase : Optional[Any] = function
def a__ ( self , _lowercase , *_lowercase ) -> str:
return self.function(_lowercase , *_lowercase )
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = 42
__snake_case = 42
__snake_case = 42
def __init__( self , _lowercase = 1 ) -> None:
_lowerCamelCase : int = dim
_lowerCamelCase : Optional[int] = {k: dim * self.args_dim[k] for k in self.args_dim}
def a__ ( self , _lowercase ) -> Dict:
if self.dim == 1:
return self.distribution_class(*_lowercase )
else:
return Independent(self.distribution_class(*_lowercase ) , 1 )
def a__ ( self , _lowercase , _lowercase = None , _lowercase = None , ) -> Distribution:
_lowerCamelCase : Any = self._base_distribution(_lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim )
@property
def a__ ( self ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def a__ ( self ) -> int:
return len(self.event_shape )
@property
def a__ ( self ) -> float:
return 0.0
def a__ ( self , _lowercase ) -> nn.Module:
return ParameterProjection(
in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def a__ ( self , *_lowercase ) -> int:
raise NotImplementedError()
@staticmethod
def a__ ( _lowercase ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = {"df": 1, "loc": 1, "scale": 1}
__snake_case = StudentT
@classmethod
def a__ ( cls , _lowercase , _lowercase , _lowercase ) -> List[Any]:
_lowerCamelCase : int = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
_lowerCamelCase : List[Any] = 2.0 + cls.squareplus(_lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = {"loc": 1, "scale": 1}
__snake_case = Normal
@classmethod
def a__ ( cls , _lowercase , _lowercase ) -> List[Any]:
_lowerCamelCase : str = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = {"total_count": 1, "logits": 1}
__snake_case = NegativeBinomial
@classmethod
def a__ ( cls , _lowercase , _lowercase ) -> int:
_lowerCamelCase : str = cls.squareplus(_lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def a__ ( self , _lowercase ) -> Distribution:
_lowerCamelCase, _lowerCamelCase : int = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowercase , logits=_lowercase )
else:
return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 )
def a__ ( self , _lowercase , _lowercase = None , _lowercase = None ) -> Distribution:
_lowerCamelCase, _lowerCamelCase : Optional[int] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 558
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = '▁'
__lowerCAmelCase = {'vocab_file': 'spiece.model'}
__lowerCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
__lowerCAmelCase = {
'google/pegasus-xsum': 512,
}
__lowerCAmelCase = logging.get_logger(__name__)
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["input_ids", "attention_mask"]
def __init__(self , UpperCAmelCase , UpperCAmelCase="<pad>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<mask_2>" , UpperCAmelCase="<mask_1>" , UpperCAmelCase=None , UpperCAmelCase=103 , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise TypeError(
f"""additional_special_tokens should be of type {type(UpperCAmelCase )}, but is"""
f""" {type(UpperCAmelCase )}""" )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(UpperCAmelCase ) , self.offset - 1 )
]
if len(set(UpperCAmelCase ) ) != len(UpperCAmelCase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , mask_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token_sent=UpperCAmelCase , offset=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
_snake_case = mask_token_sent
_snake_case = vocab_file
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase )
# add special tokens to encoder dict
_snake_case = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
_snake_case = {v: k for k, v in self.encoder.items()}
@property
def lowercase (self ) -> int:
return len(self.sp_model ) + self.offset
def lowercase (self ) -> Dict[str, int]:
_snake_case = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Any:
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__(self , UpperCAmelCase ) -> str:
_snake_case = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_snake_case = {}
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase (self , UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
_snake_case = self.sp_model.piece_to_id(UpperCAmelCase )
return sp_id + self.offset
def lowercase (self , UpperCAmelCase ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
_snake_case = self.sp_model.IdToPiece(index - self.offset )
return token
def lowercase (self , UpperCAmelCase ) -> List[Any]:
_snake_case = []
_snake_case = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase ) + token
_snake_case = []
else:
current_sub_tokens.append(UpperCAmelCase )
out_string += self.sp_model.decode(UpperCAmelCase )
return out_string.strip()
def lowercase (self , UpperCAmelCase=False ) -> List[str]:
return 1
def lowercase (self , UpperCAmelCase ) -> int:
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase (self , UpperCAmelCase , UpperCAmelCase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case = os.path.join(
UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , """wb""" ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
| 585
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase (self ) -> Optional[Any]:
torch.manual_seed(0 )
_snake_case = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowercase (self ) -> Dict:
_snake_case = self.dummy_uncond_unet
_snake_case = PNDMScheduler()
_snake_case = PNDMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
pndm.to(UpperCAmelCase )
pndm.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , num_inference_steps=20 , output_type="""numpy""" ).images
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=UpperCAmelCase )[0]
_snake_case = image[0, -3:, -3:, -1]
_snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Optional[Any]:
_snake_case = """google/ddpm-cifar10-32"""
_snake_case = UNetaDModel.from_pretrained(UpperCAmelCase )
_snake_case = PNDMScheduler()
_snake_case = PNDMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
pndm.to(UpperCAmelCase )
pndm.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , output_type="""numpy""" ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 585
| 1
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'CLIPImageProcessor'
SCREAMING_SNAKE_CASE = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , A__=None , A__=None , **A__ ) -> Tuple:
_SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A__ , )
_SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
_SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A__ , A__ )
def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> List[str]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_SCREAMING_SNAKE_CASE = self.tokenizer(A__ , return_tensors=A__ , **A__ )
if images is not None:
_SCREAMING_SNAKE_CASE = self.image_processor(A__ , return_tensors=A__ , **A__ )
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A__ ) , tensor_type=A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[int]:
return self.tokenizer.batch_decode(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]:
return self.tokenizer.decode(*A__ , **A__ )
@property
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 717
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 1_28, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_42, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
_SCREAMING_SNAKE_CASE = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 1_28,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 1_42,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(A__ ) , A__ )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(A__ ) , x.transpose() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , np.asarray(transpose(A__ ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , np.asarray(transpose(A__ , axes=(1, 2, 0) ) ) ) )
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.reshape(A__ , (4, 3) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.reshape(A__ , (12, 5) ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.asarray(reshape(A__ , (4, 3) ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.asarray(reshape(A__ , (12, 5) ) ) ) )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(A__ ) , np.squeeze(A__ ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.squeeze(A__ , axis=2 ) ) )
@require_torch
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , np.asarray(squeeze(A__ ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.asarray(squeeze(A__ , axis=2 ) ) ) )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.expand_dims(A__ , axis=1 ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.asarray(expand_dims(A__ , axis=1 ) ) ) )
| 0
| 0
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = '''https://openaipublic.azureedge.net/jukebox/models/'''
lowercase_ = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def lowerCAmelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ : Any = key.replace('''.model.1.bias''', '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ : Optional[int] = key.replace('''.model.1.weight''', '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ : Optional[int] = key.replace('''.model.3.bias''', '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__magic_name__ : List[Any] = key.replace('''.model.3.weight''', '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__magic_name__ : List[str] = key.replace('''conditioner_blocks.0''', '''conditioner_blocks''' )
if "prime_prior" in key:
__magic_name__ : str = key.replace('''prime_prior''', '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__magic_name__ : Dict = key.replace('''.emb.''', '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''', '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''', '''metadata_embedding.''' )
if "x_emb.emb." in key:
__magic_name__ : Optional[Any] = key.replace('''0.x_emb.emb''', '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''', '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''', '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''', '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''', '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''', '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''', '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''', '''embed_tokens''' )
return key
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->int:
"""simple docstring"""
__magic_name__ : str = {}
import re
__magic_name__ : Optional[int] = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ : Optional[int] = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ : Union[str, Any] = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ : Tuple = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__magic_name__ : Optional[Any] = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ : Optional[Any] = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ : Optional[int] = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__magic_name__ : str = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__magic_name__ : str = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(UpperCAmelCase ):
__magic_name__ : int = re_encoder_block_conv_in.match(UpperCAmelCase )
__magic_name__ : Optional[int] = regex_match.groups()
__magic_name__ : Tuple = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ : Optional[int] = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
__magic_name__ : Tuple = re_encoder_block_conv_in.sub(UpperCAmelCase, UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(UpperCAmelCase ):
__magic_name__ : List[str] = re_encoder_block_resnet.match(UpperCAmelCase )
__magic_name__ : Union[str, Any] = regex_match.groups()
__magic_name__ : Any = int(groups[2] ) * 2 + int(groups[3] )
__magic_name__ : Dict = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ : List[Any] = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
__magic_name__ : List[Any] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
__magic_name__ : Optional[Any] = prefix + resnet_block
__magic_name__ : Optional[int] = re_encoder_block_resnet.sub(UpperCAmelCase, UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(UpperCAmelCase ):
__magic_name__ : Dict = re_encoder_block_proj_out.match(UpperCAmelCase )
__magic_name__ : Optional[Any] = regex_match.groups()
__magic_name__ : List[Any] = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
__magic_name__ : Any = re_encoder_block_proj_out.sub(UpperCAmelCase, UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(UpperCAmelCase ):
__magic_name__ : Union[str, Any] = re_decoder_block_conv_out.match(UpperCAmelCase )
__magic_name__ : Dict = regex_match.groups()
__magic_name__ : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ : Optional[Any] = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
__magic_name__ : Union[str, Any] = re_decoder_block_conv_out.sub(UpperCAmelCase, UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(UpperCAmelCase ):
__magic_name__ : Union[str, Any] = re_decoder_block_resnet.match(UpperCAmelCase )
__magic_name__ : Tuple = regex_match.groups()
__magic_name__ : List[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
__magic_name__ : Optional[Any] = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ : Optional[Any] = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
__magic_name__ : Optional[int] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
__magic_name__ : int = prefix + resnet_block
__magic_name__ : List[Any] = re_decoder_block_resnet.sub(UpperCAmelCase, UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(UpperCAmelCase ):
__magic_name__ : Optional[Any] = re_decoder_block_proj_in.match(UpperCAmelCase )
__magic_name__ : Any = regex_match.groups()
__magic_name__ : List[str] = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
__magic_name__ : Optional[Any] = re_decoder_block_proj_in.sub(UpperCAmelCase, UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(UpperCAmelCase ):
__magic_name__ : Union[str, Any] = re_prior_cond_conv_out.match(UpperCAmelCase )
__magic_name__ : int = regex_match.groups()
__magic_name__ : int = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ : int = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
__magic_name__ : str = re_prior_cond_conv_out.sub(UpperCAmelCase, UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(UpperCAmelCase ):
__magic_name__ : Union[str, Any] = re_prior_cond_resnet.match(UpperCAmelCase )
__magic_name__ : str = regex_match.groups()
__magic_name__ : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
__magic_name__ : Optional[Any] = {'''1''': 1, '''3''': 2}[groups[-2]]
__magic_name__ : List[str] = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
__magic_name__ : str = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
__magic_name__ : Any = prefix + resnet_block
__magic_name__ : Dict = re_prior_cond_resnet.sub(UpperCAmelCase, UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(UpperCAmelCase ):
__magic_name__ : Tuple = re_prior_cond_proj_in.match(UpperCAmelCase )
__magic_name__ : str = regex_match.groups()
__magic_name__ : Optional[int] = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
__magic_name__ : Optional[Any] = re_prior_cond_proj_in.sub(UpperCAmelCase, UpperCAmelCase )
# keep original key
else:
__magic_name__ : Optional[int] = original_key
__magic_name__ : List[Any] = replace_key(UpperCAmelCase )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
__magic_name__ : Any = model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
__magic_name__ : List[Any] = original_key
__magic_name__ : Optional[Any] = original_key
__magic_name__ : List[Any] = value
return new_dict
@torch.no_grad()
def lowerCAmelCase ( UpperCAmelCase=None, UpperCAmelCase=None ) ->List[Any]:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
__magic_name__ : Union[str, Any] = requests.get(F'''{PREFIX}{file}''', allow_redirects=UpperCAmelCase )
os.makedirs(F'''{pytorch_dump_folder_path}/''', exist_ok=UpperCAmelCase )
open(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''', '''wb''' ).write(r.content )
__magic_name__ : str = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__magic_name__ : Tuple = JukeboxConfig.from_pretrained(UpperCAmelCase )
__magic_name__ : Dict = JukeboxModel(UpperCAmelCase )
__magic_name__ : List[Any] = []
__magic_name__ : int = {}
for i, dict_name in enumerate(UpperCAmelCase ):
__magic_name__ : List[Any] = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )['''model''']
__magic_name__ : Optional[int] = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__magic_name__ : str = old_dic[k]
elif k.endswith('''.w''' ):
__magic_name__ : Optional[int] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__magic_name__ : Union[str, Any] = old_dic[k]
else:
__magic_name__ : Optional[Any] = old_dic[k]
__magic_name__ : Union[str, Any] = '''vqvae''' if i == 0 else F'''priors.{3 - i}'''
__magic_name__ : List[Any] = fix_jukebox_keys(UpperCAmelCase, model.state_dict(), UpperCAmelCase, UpperCAmelCase )
weight_dict.append(UpperCAmelCase )
__magic_name__ : Union[str, Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(UpperCAmelCase )
for i in range(len(UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
with open(F'''{pytorch_dump_folder_path}/mapping.json''', '''w''' ) as txtfile:
json.dump(UpperCAmelCase, UpperCAmelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
lowercase_ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 154
|
from math import isqrt
def lowerCAmelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2, isqrt(UpperCAmelCase ) + 1 ) )
def lowerCAmelCase ( UpperCAmelCase = 10**6 ) ->int:
"""simple docstring"""
__magic_name__ : Any = 0
__magic_name__ : Union[str, Any] = 1
__magic_name__ : Any = 7
while prime_candidate < max_prime:
primes_count += is_prime(UpperCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"{solution() = }")
| 154
| 1
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_lowerCAmelCase = """true"""
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=8_2 ,_lowerCAmelCase=1_6 ):
'''simple docstring'''
set_seed(4_2 )
A_ : List[Any] = RegressionModel()
A_ : int = deepcopy(_lowerCAmelCase )
A_ : List[str] = RegressionDataset(length=_lowerCAmelCase )
A_ : str = DataLoader(_lowerCAmelCase ,batch_size=_lowerCAmelCase )
model.to(accelerator.device )
A_ , A_ : str = accelerator.prepare(_lowerCAmelCase ,_lowerCAmelCase )
return model, ddp_model, dataloader
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=False ):
'''simple docstring'''
A_ : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
A_ : List[str] = load_dataset("""glue""" ,"""mrpc""" ,split="""validation""" )
def tokenize_function(_lowerCAmelCase ):
A_ : Tuple = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=_lowerCAmelCase ,max_length=_lowerCAmelCase )
return outputs
with accelerator.main_process_first():
A_ : List[Any] = dataset.map(
_lowerCAmelCase ,batched=_lowerCAmelCase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
A_ : int = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(_lowerCAmelCase ):
if use_longest:
return tokenizer.pad(_lowerCAmelCase ,padding="""longest""" ,return_tensors="""pt""" )
return tokenizer.pad(_lowerCAmelCase ,padding="""max_length""" ,max_length=1_2_8 ,return_tensors="""pt""" )
return DataLoader(_lowerCAmelCase ,shuffle=_lowerCAmelCase ,collate_fn=_lowerCAmelCase ,batch_size=1_6 )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : Optional[int] = Accelerator(dispatch_batches=_lowerCAmelCase ,split_batches=_lowerCAmelCase )
A_ : Union[str, Any] = get_dataloader(_lowerCAmelCase ,not dispatch_batches )
A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" ,return_dict=_lowerCAmelCase )
A_ , A_ : Dict = accelerator.prepare(_lowerCAmelCase ,_lowerCAmelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : Union[str, Any] = []
for batch in dataloader:
A_ , A_ : Optional[Any] = batch.values()
with torch.no_grad():
A_ : Optional[int] = model(_lowerCAmelCase )
A_ , A_ : Optional[int] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
A_ , A_ : str = [], []
for logit, targ in logits_and_targets:
logits.append(_lowerCAmelCase )
targs.append(_lowerCAmelCase )
A_ , A_ : List[Any] = torch.cat(_lowerCAmelCase ), torch.cat(_lowerCAmelCase )
return logits, targs
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=8_2 ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=1_6 ):
'''simple docstring'''
A_ , A_ , A_ : Any = get_basic_setup(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
A_ , A_ : int = generate_predictions(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
assert (
len(_lowerCAmelCase ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_lowerCAmelCase )}"""
def _lowerCAmelCase ( _lowerCAmelCase = False ,_lowerCAmelCase = False ):
'''simple docstring'''
A_ : Tuple = evaluate.load("""glue""" ,"""mrpc""" )
A_ , A_ : Tuple = get_mrpc_setup(_lowerCAmelCase ,_lowerCAmelCase )
# First do baseline
A_ , A_ , A_ : int = setup["""no"""]
model.to(_lowerCAmelCase )
model.eval()
for batch in dataloader:
batch.to(_lowerCAmelCase )
with torch.inference_mode():
A_ : Optional[int] = model(**_lowerCAmelCase )
A_ : List[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_lowerCAmelCase ,references=batch["""labels"""] )
A_ : Optional[int] = metric.compute()
# Then do distributed
A_ , A_ , A_ : List[Any] = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
A_ : Optional[int] = model(**_lowerCAmelCase )
A_ : int = outputs.logits.argmax(dim=-1 )
A_ : List[str] = batch["""labels"""]
A_ , A_ : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_lowerCAmelCase ,references=_lowerCAmelCase )
A_ : int = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def _lowerCAmelCase ( ):
'''simple docstring'''
A_ : str = Accelerator(split_batches=_lowerCAmelCase ,dispatch_batches=_lowerCAmelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(_lowerCAmelCase ,_lowerCAmelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
A_ : Optional[int] = Accelerator(split_batches=_lowerCAmelCase ,dispatch_batches=_lowerCAmelCase )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(_lowerCAmelCase ,9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
A_ : Any = Accelerator()
test_torch_metrics(_lowerCAmelCase ,5_1_2 )
accelerator.state._reset_state()
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 481
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
a = StableDiffusionInstructPixaPixPipeline
a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
a = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a = IMAGE_TO_IMAGE_IMAGE_PARAMS
a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
A_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A_ : int = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
A_ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : List[Any] = CLIPTextModel(a__ )
A_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowerCamelCase ( self , a__ , a__=0 ):
A_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
A_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : Union[str, Any] = Image.fromarray(np.uinta(a__ ) ).convert("""RGB""" )
if str(a__ ).startswith("""mps""" ):
A_ : Dict = torch.manual_seed(a__ )
else:
A_ : List[Any] = torch.Generator(device=a__ ).manual_seed(a__ )
A_ : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _lowerCamelCase ( self ):
A_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Tuple = self.get_dummy_components()
A_ : List[Any] = StableDiffusionInstructPixaPixPipeline(**a__ )
A_ : List[Any] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
A_ : List[str] = self.get_dummy_inputs(a__ )
A_ : str = sd_pipe(**a__ ).images
A_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ : List[str] = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self ):
A_ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Tuple = self.get_dummy_components()
A_ : Tuple = StableDiffusionInstructPixaPixPipeline(**a__ )
A_ : Tuple = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
A_ : Optional[int] = self.get_dummy_inputs(a__ )
A_ : Union[str, Any] = """french fries"""
A_ : str = sd_pipe(**a__ , negative_prompt=a__ )
A_ : Any = output.images
A_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ : List[str] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self ):
A_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Dict = self.get_dummy_components()
A_ : Dict = StableDiffusionInstructPixaPixPipeline(**a__ )
A_ : str = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
A_ : Dict = self.get_dummy_inputs(a__ )
A_ : int = [inputs["""prompt"""]] * 2
A_ : Any = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
A_ : List[Any] = torch.from_numpy(a__ ).unsqueeze(0 ).to(a__ )
A_ : List[str] = image / 2 + 0.5
A_ : int = image.permute(0 , 3 , 1 , 2 )
A_ : Optional[Any] = image.repeat(2 , 1 , 1 , 1 )
A_ : Optional[int] = sd_pipe(**a__ ).images
A_ : Union[str, Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
A_ : Union[str, Any] = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self ):
A_ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Tuple = self.get_dummy_components()
A_ : Dict = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
A_ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**a__ )
A_ : Union[str, Any] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
A_ : Optional[int] = self.get_dummy_inputs(a__ )
A_ : Union[str, Any] = sd_pipe(**a__ ).images
A_ : Dict = image[0, -3:, -3:, -1]
A_ : Any = [round(a__ , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(a__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
A_ : Tuple = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _lowerCamelCase ( self ):
A_ : Optional[int] = self.get_dummy_components()
A_ : List[Any] = StableDiffusionInstructPixaPixPipeline(**a__ )
A_ : str = VaeImageProcessor(do_resize=a__ , do_normalize=a__ )
A_ : Union[str, Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
A_ : Optional[Any] = pipe(**self.get_dummy_inputs_by_type(a__ , input_image_type="""pt""" ) )[0]
A_ : Any = components["""vae"""]
A_ : Any = self.get_dummy_inputs_by_type(a__ , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
A_ : int = vae.encode(inputs[image_param] ).latent_dist.mode()
A_ : List[Any] = pipe(**a__ )[0]
A_ : str = np.abs(out - out_latents_inputs ).max()
self.assertLess(a__ , 1E-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self , a__=0 ):
A_ : str = torch.manual_seed(a__ )
A_ : Dict = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
A_ : List[Any] = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def _lowerCamelCase ( self ):
A_ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
A_ : Optional[int] = self.get_inputs()
A_ : List[str] = pipe(**a__ ).images
A_ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
A_ : Optional[Any] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _lowerCamelCase ( self ):
A_ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=a__ )
A_ : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
A_ : int = self.get_inputs()
A_ : List[Any] = pipe(**a__ ).images
A_ : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
A_ : Any = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _lowerCamelCase ( self ):
A_ : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=a__ )
A_ : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
A_ : str = self.get_inputs()
A_ : List[str] = pipe(**a__ ).images
A_ : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
A_ : Optional[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _lowerCamelCase ( self ):
A_ : int = 0
def callback_fn(a__ , a__ , a__ ) -> None:
A_ : str = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
A_ : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A_ : str = latents[0, -3:, -3:, -1]
A_ : int = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
A_ : Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A_ : List[str] = latents[0, -3:, -3:, -1]
A_ : Union[str, Any] = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
A_ : Tuple = False
A_ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=a__ , torch_dtype=torch.floataa )
A_ : Optional[int] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
A_ : Dict = self.get_inputs()
pipe(**a__ , callback=a__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _lowerCamelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A_ : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=a__ , torch_dtype=torch.floataa )
A_ : Optional[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A_ : List[Any] = self.get_inputs()
A_ : int = pipe(**a__ )
A_ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def _lowerCamelCase ( self ):
A_ : List[str] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
A_ : Dict = inputs["""image"""].resize((504, 504) )
A_ : int = """timbrooks/instruct-pix2pix"""
A_ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
a__ , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
A_ : Optional[int] = pipe(**a__ )
A_ : Optional[Any] = output.images[0]
A_ : str = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
A_ : Optional[Any] = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 481
| 1
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
@staticmethod
def __UpperCAmelCase ( *__lowerCamelCase : Tuple , **__lowerCamelCase : str ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
A__ : Optional[Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = ObjectDetectionPipeline(model=__lowerCamelCase , image_processor=__lowerCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ):
"""simple docstring"""
_snake_case = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(__lowerCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__lowerCamelCase , {
'''score''': ANY(__lowerCamelCase ),
'''label''': ANY(__lowerCamelCase ),
'''box''': {'''xmin''': ANY(__lowerCamelCase ), '''ymin''': ANY(__lowerCamelCase ), '''xmax''': ANY(__lowerCamelCase ), '''ymax''': ANY(__lowerCamelCase )},
} , )
import datasets
_snake_case = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
_snake_case = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
_snake_case = object_detector(__lowerCamelCase , threshold=0.0 )
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(__lowerCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__lowerCamelCase , {
'''score''': ANY(__lowerCamelCase ),
'''label''': ANY(__lowerCamelCase ),
'''box''': {'''xmin''': ANY(__lowerCamelCase ), '''ymin''': ANY(__lowerCamelCase ), '''xmax''': ANY(__lowerCamelCase ), '''ymax''': ANY(__lowerCamelCase )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
@require_torch
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
_snake_case = AutoModelForObjectDetection.from_pretrained(__lowerCamelCase )
_snake_case = AutoFeatureExtractor.from_pretrained(__lowerCamelCase )
_snake_case = ObjectDetectionPipeline(model=__lowerCamelCase , feature_extractor=__lowerCamelCase )
_snake_case = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
] , )
_snake_case = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
],
[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
],
] , )
@require_torch
@slow
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = '''facebook/detr-resnet-50'''
_snake_case = AutoModelForObjectDetection.from_pretrained(__lowerCamelCase )
_snake_case = AutoFeatureExtractor.from_pretrained(__lowerCamelCase )
_snake_case = ObjectDetectionPipeline(model=__lowerCamelCase , feature_extractor=__lowerCamelCase )
_snake_case = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
_snake_case = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
] , )
@require_torch
@slow
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = '''facebook/detr-resnet-50'''
_snake_case = pipeline('''object-detection''' , model=__lowerCamelCase )
_snake_case = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
_snake_case = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
] , )
@require_torch
@slow
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = 0.9_9_8_5
_snake_case = '''facebook/detr-resnet-50'''
_snake_case = pipeline('''object-detection''' , model=__lowerCamelCase )
_snake_case = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = '''Narsil/layoutlmv3-finetuned-funsd'''
_snake_case = 0.9_9_9_3
_snake_case = pipeline('''object-detection''' , model=__lowerCamelCase , threshold=__lowerCamelCase )
_snake_case = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_3, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_9_4, '''ymin''': 2_5_4, '''xmax''': 3_4_3, '''ymax''': 2_6_4}},
{'''score''': 0.9_9_9_3, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_9_4, '''ymin''': 2_5_4, '''xmax''': 3_4_3, '''ymax''': 2_6_4}},
] , )
| 103
|
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : int = f'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCamelCase__ )
if number < 1:
UpperCAmelCase__ : Optional[Any] = f'''Input value of [number={number}] must be > 0'''
raise ValueError(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = 1
for i in range(1 , UpperCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 407
| 0
|
'''simple docstring'''
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : int = [False] * len(lowerCAmelCase_ )
_snake_case : Tuple = []
queue.append(lowerCAmelCase_ )
_snake_case : Any = True
while queue:
_snake_case : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCAmelCase_ )
_snake_case : Optional[Any] = True
_snake_case : List[str] = u
return visited[t]
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = [-1] * (len(lowerCAmelCase_ ))
_snake_case : List[str] = 0
while bfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Optional[Any] = float('''Inf''' )
_snake_case : List[str] = sink
while s != source:
# Find the minimum value in select path
_snake_case : Optional[int] = min(lowerCAmelCase_ , graph[parent[s]][s] )
_snake_case : Union[str, Any] = parent[s]
max_flow += path_flow
_snake_case : Optional[int] = sink
while v != source:
_snake_case : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_snake_case : List[Any] = parent[v]
return max_flow
UpperCAmelCase : str = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCAmelCase, UpperCAmelCase : Union[str, Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 47
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase : Tuple = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
UpperCAmelCase : str = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase : Optional[Any] = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase : Tuple = sorted(arg_to_scheduler.keys())
UpperCAmelCase : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class lowerCamelCase (pl.LightningModule ):
def __init__( self , lowercase__ , lowercase__=None , lowercase__="base" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowercase__ )
_snake_case : Union[str, Any] = 0
_snake_case : int = Path(self.hparams.output_dir )
_snake_case : int = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_snake_case : Tuple = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase__ , **lowercase__ , )
else:
_snake_case : PretrainedConfig = config
_snake_case : Optional[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , lowercase__ , lowercase__ ):
assert hasattr(self.config , lowercase__ ), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , lowercase__ , getattr(self.hparams , lowercase__ ) )
if tokenizer is None:
_snake_case : Optional[int] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase__ , )
else:
_snake_case : PreTrainedTokenizer = tokenizer
_snake_case : Any = MODEL_MODES[mode]
if model is None:
_snake_case : List[Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase__ , )
else:
_snake_case : Optional[Any] = model
def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> List[str]:
"""simple docstring"""
_snake_case : Dict = self.model_type.from_pretrained(*lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler]
_snake_case : Optional[int] = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
_snake_case : str = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Any = self.model
_snake_case : List[Any] = ['''bias''', '''LayerNorm.weight''']
_snake_case : List[str] = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
_snake_case : Any = Adafactor(
lowercase__ , lr=self.hparams.learning_rate , scale_parameter=lowercase__ , relative_step=lowercase__ )
else:
_snake_case : List[str] = AdamW(
lowercase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
_snake_case : List[str] = optimizer
_snake_case : Any = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any:
"""simple docstring"""
return self.validation_step(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple:
"""simple docstring"""
return self.validation_end(lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
_snake_case : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase_ ( self , lowercase__ ) -> Any:
"""simple docstring"""
if stage == "test":
_snake_case : Any = len(self.test_dataloader().dataset )
else:
_snake_case : Dict = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase__ )
_snake_case : Optional[int] = len(self.train_dataloader().dataset )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = False ) -> str:
"""simple docstring"""
raise NotImplementedError('''You must implement this for your task''' )
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return self.train_loader
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase__ )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
lowercase__ , list(filter(lowercase__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : Dict = self.output_dir.joinpath('''best_tfmr''' )
_snake_case : Tuple = self.step_count
self.model.save_pretrained(lowercase__ )
self.tokenizer.save_pretrained(lowercase__ )
@staticmethod
def UpperCAmelCase_ ( lowercase__ , lowercase__ ) -> Tuple:
"""simple docstring"""
parser.add_argument(
'''--model_name_or_path''' , default=lowercase__ , type=lowercase__ , required=lowercase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=lowercase__ , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=lowercase__ , type=lowercase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(lowercase__ ).parent / '''test_run''' / '''cache''' ) , type=lowercase__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=lowercase__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=lowercase__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=lowercase__ , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=lowercase__ , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase__ , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=lowercase__ , metavar=lowercase__ , type=lowercase__ , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase__ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase__ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=lowercase__ , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase__ )
parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase__ )
parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase__ )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> str:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowercase__ )
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any:
"""simple docstring"""
_snake_case : Any = trainer.lr_schedulers[0]['''scheduler''']
_snake_case : Optional[int] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]:
"""simple docstring"""
rank_zero_info('''***** Validation results *****''' )
_snake_case : Dict = trainer.callback_metrics
# Log results
for key in sorted(lowercase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Dict:
"""simple docstring"""
rank_zero_info('''***** Test results *****''' )
_snake_case : Dict = trainer.callback_metrics
# Log and save results to file
_snake_case : str = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(lowercase__ , '''w''' ) as writer:
for key in sorted(lowercase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
parser.add_argument(
'''--output_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=lowerCAmelCase_ , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase_ )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase_ , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[] , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
_snake_case : Union[str, Any] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase_ )
# add custom checkpoints
if checkpoint_callback is None:
_snake_case : Any = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase_ )
if logging_callback is None:
_snake_case : str = LoggingCallback()
_snake_case : Tuple = {}
if args.fpaa:
_snake_case : Union[str, Any] = 16
if args.gpus > 1:
_snake_case : Optional[Any] = '''auto'''
_snake_case : Tuple = '''ddp'''
_snake_case : Optional[Any] = args.accumulate_grad_batches
_snake_case : Tuple = None
_snake_case : str = '''auto'''
_snake_case : int = pl.Trainer.from_argparse_args(
lowerCAmelCase_ , weights_summary=lowerCAmelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase_ , )
if args.do_train:
trainer.fit(lowerCAmelCase_ )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 47
| 1
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = RobertaTokenizer
_UpperCAmelCase = RobertaTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = {"""cls_token""": """<s>"""}
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE_ : str = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase__ ) )
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 'lower newer'
SCREAMING_SNAKE_CASE_ : Any = 'lower newer'
return input_text, output_text
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : Optional[int] = 'lower newer'
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.tokenize(lowerCAmelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=lowerCAmelCase__ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=lowerCAmelCase__ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer_class.from_pretrained('roberta-base' )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.encode(
'sequence builders' , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'Encode this sequence.'
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE_ : Any = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )} ) # mask token has a left space
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE_ : List[Any] = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = encoded.index(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.encode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = encoded.index(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE_ : str = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCAmelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , lowerCAmelCase__ )
self.assertEqual(post_processor_state['add_prefix_space'] , lowerCAmelCase__ )
self.assertEqual(post_processor_state['trim_offsets'] , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'''{text_of_1_token} {text_of_1_token}'''
SCREAMING_SNAKE_CASE_ : Tuple = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE_ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE_ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE_ : Any = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE_ : int = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE_ : int = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
| 101
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = """gptj"""
_a = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase=50_400 , lowerCAmelCase=2_048 , lowerCAmelCase=4_096 , lowerCAmelCase=28 , lowerCAmelCase=16 , lowerCAmelCase=64 , lowerCAmelCase=None , lowerCAmelCase="gelu_new" , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=1e-5 , lowerCAmelCase=0.02 , lowerCAmelCase=True , lowerCAmelCase=50_256 , lowerCAmelCase=50_256 , lowerCAmelCase=False , **lowerCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
_lowercase =vocab_size
_lowercase =n_positions
_lowercase =n_embd
_lowercase =n_layer
_lowercase =n_head
_lowercase =n_inner
_lowercase =rotary_dim
_lowercase =activation_function
_lowercase =resid_pdrop
_lowercase =embd_pdrop
_lowercase =attn_pdrop
_lowercase =layer_norm_epsilon
_lowercase =initializer_range
_lowercase =use_cache
_lowercase =bos_token_id
_lowercase =eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , **lowerCAmelCase )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase , lowerCAmelCase = "default" , lowerCAmelCase = None , lowerCAmelCase = False , ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowerCAmelCase , task=lowerCAmelCase , patching_specs=lowerCAmelCase , use_past=lowerCAmelCase )
if not getattr(self._config , 'pad_token_id' , lowerCAmelCase ):
# TODO: how to do that better?
_lowercase =0
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_lowercase =OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' )
_lowercase ={0: 'batch', 1: 'past_sequence + sequence'}
else:
_lowercase ={0: 'batch', 1: 'sequence'}
return common_inputs
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self._config.n_head
def A__ ( self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_lowercase =super(lowerCAmelCase , self ).generate_dummy_inputs(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
_lowercase =OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase =seqlen + 2
_lowercase =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowercase =[
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(self.num_layers )
]
_lowercase =common_inputs['attention_mask']
if self.use_past:
_lowercase =ordered_inputs['attention_mask'].dtype
_lowercase =torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def A__ ( self ) -> int:
'''simple docstring'''
return 13
| 291
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'nat'
_SCREAMING_SNAKE_CASE = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , lowercase=4 , lowercase=3 , lowercase=64 , lowercase=[3, 4, 6, 5] , lowercase=[2, 4, 8, 16] , lowercase=7 , lowercase=3.0 , lowercase=True , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase="gelu" , lowercase=0.02 , lowercase=1e-5 , lowercase=0.0 , lowercase=None , lowercase=None , **lowercase , ) -> Union[str, Any]:
super().__init__(**lowercase )
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = len(lowercase )
lowerCAmelCase = num_heads
lowerCAmelCase = kernel_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase = int(embed_dim * 2 ** (len(lowercase ) - 1) )
lowerCAmelCase = layer_scale_init_value
lowerCAmelCase = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(lowercase ) + 1 )]
lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
| 393
|
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError("""Input value must be an 'int' type""" )
lowerCAmelCase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : List[str] = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : str = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowercase__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 390
|
from math import factorial
def __a ( lowerCAmelCase_ : int = 1_00 ) -> int:
'''simple docstring'''
return sum(int(lowerCAmelCase_ ) for x in str(factorial(lowerCAmelCase_ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 593
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Tuple = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 700
|
from copy import deepcopy
class A__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase__ : list[int] | None = None , lowerCamelCase__ : int | None = None ):
if arr is None and size is not None:
a__ : Union[str, Any] = size
a__ : Optional[Any] = [0] * size
elif arr is not None:
self.init(lowerCamelCase__ )
else:
raise ValueError("Either arr or size must be specified" )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : list[int] ):
a__ : Any = len(lowerCamelCase__ )
a__ : List[Any] = deepcopy(lowerCamelCase__ )
for i in range(1 , self.size ):
a__ : Union[str, Any] = self.next_(lowerCamelCase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def _UpperCamelCase( self : Tuple ):
a__ : List[str] = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
a__ : Optional[Any] = self.next_(lowerCamelCase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _UpperCamelCase( lowerCamelCase__ : int ):
return index + (index & (-index))
@staticmethod
def _UpperCamelCase( lowerCamelCase__ : int ):
return index - (index & (-index))
def _UpperCamelCase( self : str , lowerCamelCase__ : int , lowerCamelCase__ : int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
a__ : Optional[int] = self.next_(lowerCamelCase__ )
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : int ):
self.add(lowerCamelCase__ , value - self.get(lowerCamelCase__ ) )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int ):
if right == 0:
return 0
a__ : Tuple = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
a__ : List[Any] = self.prev(lowerCamelCase__ )
return result
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ):
return self.prefix(lowerCamelCase__ ) - self.prefix(lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int ):
return self.query(lowerCamelCase__ , index + 1 )
def _UpperCamelCase( self : int , lowerCamelCase__ : int ):
value -= self.tree[0]
if value < 0:
return -1
a__ : Union[str, Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
a__ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151
| 0
|
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
assert column_title.isupper()
_lowercase : Optional[Any] = 0
_lowercase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) - 1
_lowercase : Optional[int] = 0
while index >= 0:
_lowercase : Union[str, Any] = (ord(column_title[index] ) - 64) * pow(26 , SCREAMING_SNAKE_CASE )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 66
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A ( UpperCamelCase_ ):
UpperCamelCase__ : List[str] =['pixel_values']
def __init__( self : Any , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : str , ) -> None:
"""simple docstring"""
super().__init__(**lowercase_ )
_lowerCamelCase : Optional[int] =size if size is not None else {'shortest_edge': 224}
_lowerCamelCase : List[Any] =get_size_dict(lowercase_ , default_to_square=lowercase_ )
_lowerCamelCase : str =crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowerCamelCase : str =get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name='crop_size' )
_lowerCamelCase : Dict =do_resize
_lowerCamelCase : int =size
_lowerCamelCase : Optional[Any] =resample
_lowerCamelCase : Optional[int] =do_center_crop
_lowerCamelCase : Tuple =crop_size
_lowerCamelCase : Optional[int] =do_rescale
_lowerCamelCase : Optional[Any] =rescale_factor
_lowerCamelCase : Union[str, Any] =do_normalize
_lowerCamelCase : Optional[int] =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowerCamelCase : Any =image_std if image_std is not None else OPENAI_CLIP_STD
_lowerCamelCase : Any =do_convert_rgb
def lowerCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ) -> np.ndarray:
"""simple docstring"""
_lowerCamelCase : int =get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowerCamelCase : Union[str, Any] =get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> str:
"""simple docstring"""
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase ( self : List[str] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : List[str] =size if size is not None else self.size
_lowerCamelCase : Any =get_size_dict(lowercase_ , param_name='size' , default_to_square=lowercase_ )
_lowerCamelCase : str =resample if resample is not None else self.resample
_lowerCamelCase : List[Any] =do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : Optional[Any] =crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : Dict =get_size_dict(lowercase_ , param_name='crop_size' , default_to_square=lowercase_ )
_lowerCamelCase : str =do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : List[str] =do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : Tuple =image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : int =image_std if image_std is not None else self.image_std
_lowerCamelCase : Tuple =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCamelCase : Any =make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCamelCase : Tuple =[convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
_lowerCamelCase : Optional[Any] =[to_numpy_array(lowercase_ ) for image in images]
if do_resize:
_lowerCamelCase : Optional[Any] =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
_lowerCamelCase : Optional[int] =[self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
_lowerCamelCase : Optional[Any] =[self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
_lowerCamelCase : List[Any] =[self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
_lowerCamelCase : List[str] =[to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
_lowerCamelCase : Tuple ={'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 464
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 420
|
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def __snake_case ( lowercase : int = 1_000_000 , lowercase : int = 10 ):
snake_case_ = defaultdict(lowercase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
snake_case_ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
snake_case_ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 420
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Any = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 257
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase__ = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class a ( lowerCAmelCase_ ):
_snake_case : Union[PIL.Image.Image, np.ndarray]
class a ( lowerCAmelCase_ ):
def __init__( self : Dict , __lowerCAmelCase : PriorTransformer , __lowerCAmelCase : CLIPVisionModel , __lowerCAmelCase : CLIPImageProcessor , __lowerCAmelCase : HeunDiscreteScheduler , __lowerCAmelCase : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=__lowerCAmelCase , image_encoder=__lowerCAmelCase , image_processor=__lowerCAmelCase , scheduler=__lowerCAmelCase , renderer=__lowerCAmelCase , )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
if latents is None:
_UpperCAmelCase = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_UpperCAmelCase = latents.to(__lowerCAmelCase )
_UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_UpperCAmelCase = torch.device(f'''cuda:{gpu_id}''' )
_UpperCAmelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase , __lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : List[str] ):
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Any , ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(image[0] , torch.Tensor ):
_UpperCAmelCase = torch.cat(__lowerCAmelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(__lowerCAmelCase , axis=0 )
if not isinstance(__lowerCAmelCase , torch.Tensor ):
_UpperCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_UpperCAmelCase = image.to(dtype=self.image_encoder.dtype , device=__lowerCAmelCase )
_UpperCAmelCase = self.image_encoder(__lowerCAmelCase )["""last_hidden_state"""]
_UpperCAmelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_UpperCAmelCase = image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase = torch.zeros_like(__lowerCAmelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 25 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : float = 4.0 , __lowerCAmelCase : int = 64 , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , ):
if isinstance(__lowerCAmelCase , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(__lowerCAmelCase , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_UpperCAmelCase = len(__lowerCAmelCase )
else:
raise ValueError(
f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__lowerCAmelCase )}''' )
_UpperCAmelCase = self._execution_device
_UpperCAmelCase = batch_size * num_images_per_prompt
_UpperCAmelCase = guidance_scale > 1.0
_UpperCAmelCase = self._encode_image(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# prior
self.scheduler.set_timesteps(__lowerCAmelCase , device=__lowerCAmelCase )
_UpperCAmelCase = self.scheduler.timesteps
_UpperCAmelCase = self.prior.config.num_embeddings
_UpperCAmelCase = self.prior.config.embedding_dim
_UpperCAmelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_UpperCAmelCase = latents.reshape(latents.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = self.prior(
__lowerCAmelCase , timestep=__lowerCAmelCase , proj_embedding=__lowerCAmelCase , ).predicted_image_embedding
# remove the variance
_UpperCAmelCase , _UpperCAmelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_UpperCAmelCase , _UpperCAmelCase = noise_pred.chunk(2 )
_UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_UpperCAmelCase = self.scheduler.step(
__lowerCAmelCase , timestep=__lowerCAmelCase , sample=__lowerCAmelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__lowerCAmelCase )
_UpperCAmelCase = []
for i, latent in enumerate(__lowerCAmelCase ):
print()
_UpperCAmelCase = self.renderer.decode(
latent[None, :] , __lowerCAmelCase , size=__lowerCAmelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(__lowerCAmelCase )
_UpperCAmelCase = torch.stack(__lowerCAmelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_UpperCAmelCase = images.cpu().numpy()
if output_type == "pil":
_UpperCAmelCase = [self.numpy_to_pil(__lowerCAmelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__lowerCAmelCase )
| 277
| 0
|
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 1000 ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict = 1
UpperCAmelCase : Union[str, Any] = 0
for divide_by_number in range(_lowerCAmelCase , digit + 1 ):
UpperCAmelCase : list[int] = []
UpperCAmelCase : Optional[int] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_lowerCAmelCase ):
UpperCAmelCase : List[str] = len(_lowerCAmelCase )
UpperCAmelCase : str = divide_by_number
else:
has_been_divided.append(_lowerCAmelCase )
UpperCAmelCase : Tuple = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
'''simple docstring'''
UpperCamelCase__: dict[tuple[int, int, int], int] = {}
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCAmelCase : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCAmelCase : int = _calculate(days - 1 , _lowerCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCAmelCase : Optional[Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCAmelCase : Tuple = _calculate(days - 1 , _lowerCAmelCase , 0 )
UpperCAmelCase : str = state_late + state_absent + state_ontime
UpperCAmelCase : List[Any] = prizestrings
return prizestrings
def snake_case_ ( _lowerCAmelCase : int = 30 ) -> int:
return _calculate(_lowerCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 528
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Optional[int]=18 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : Any=4_00 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : int=None , ) -> List[str]:
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 20}
UpperCAmelCase_ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
def UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
__A = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCamelCase ( self : List[Any] ) -> Tuple:
UpperCAmelCase_ = MobileNetVaImageProcessingTester(self )
@property
def UpperCamelCase ( self : List[str] ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self : List[str] ) -> List[str]:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''crop_size''' ) )
def UpperCamelCase ( self : List[Any] ) -> Dict:
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase ( self : Optional[int] ) -> Any:
pass
def UpperCamelCase ( self : str ) -> int:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(lowerCAmelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase ( self : List[str] ) -> List[str]:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(lowerCAmelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase ( self : Any ) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(lowerCAmelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 121
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Optional[Any] = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ['MobileViTFeatureExtractor']
_lowerCamelCase : Union[str, Any] = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 121
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
SCREAMING_SNAKE_CASE_:Union[str, Any] = random.Random()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if rng is None:
A : str = global_rng
A : Optional[int] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=7, lowerCamelCase__=400, lowerCamelCase__=2000, lowerCamelCase__=24, lowerCamelCase__=24, lowerCamelCase__=0.0, lowerCamelCase__=1_6000, lowerCamelCase__=True, lowerCamelCase__=True, ):
A : Optional[int] = parent
A : List[Any] = batch_size
A : str = min_seq_length
A : str = max_seq_length
A : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A : Dict = feature_size
A : Any = num_mel_bins
A : int = padding_value
A : Optional[int] = sampling_rate
A : str = return_attention_mask
A : int = do_normalize
def _lowerCAmelCase ( self ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__=False ):
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
A : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
A : Tuple = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = SpeechaTextFeatureExtractor if is_speech_available() else None
def _lowerCAmelCase ( self ):
A : Tuple = SpeechaTextFeatureExtractionTester(self )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
self.assertTrue(np.all(np.mean(lowerCamelCase__, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__, axis=0 ) - 1 ) < 1e-3 ) )
def _lowerCAmelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A : List[str] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
A : List[Any] = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
A : Any = feature_extractor(lowerCamelCase__, padding=lowerCamelCase__, return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
A : List[str] = feature_extractor(speech_inputs[0], return_tensors="""np""" ).input_features
A : Tuple = feature_extractor(np_speech_inputs[0], return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
# Test batched
A : List[Any] = feature_extractor(lowerCamelCase__, return_tensors="""np""" ).input_features
A : int = feature_extractor(lowerCamelCase__, return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__, lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A : Optional[Any] = np.asarray(lowerCamelCase__ )
A : List[Any] = feature_extractor(lowerCamelCase__, return_tensors="""np""" ).input_features
A : str = feature_extractor(lowerCamelCase__, return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__, lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
A : Any = ["""longest""", """max_length""", """do_not_pad"""]
A : int = [None, 16, None]
for max_length, padding in zip(lowerCamelCase__, lowerCamelCase__ ):
A : Tuple = feature_extractor(
lowerCamelCase__, padding=lowerCamelCase__, max_length=lowerCamelCase__, return_attention_mask=lowerCamelCase__ )
A : Tuple = inputs.input_features
A : Union[str, Any] = inputs.attention_mask
A : Optional[Any] = [np.sum(lowerCamelCase__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
A : List[str] = ["""longest""", """max_length""", """do_not_pad"""]
A : Tuple = [None, 16, None]
for max_length, padding in zip(lowerCamelCase__, lowerCamelCase__ ):
A : Union[str, Any] = feature_extractor(
lowerCamelCase__, max_length=lowerCamelCase__, padding=lowerCamelCase__, return_tensors="""np""", return_attention_mask=lowerCamelCase__ )
A : Optional[int] = inputs.input_features
A : List[Any] = inputs.attention_mask
A : str = [np.sum(lowerCamelCase__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _lowerCAmelCase ( self ):
A : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
A : Optional[Any] = feature_extractor(
lowerCamelCase__, padding="""max_length""", max_length=4, truncation=lowerCamelCase__, return_tensors="""np""", return_attention_mask=lowerCamelCase__, )
A : Union[str, Any] = inputs.input_features
A : Optional[Any] = inputs.attention_mask
A : Dict = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def _lowerCAmelCase ( self ):
A : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
A : List[Any] = feature_extractor(
lowerCamelCase__, padding="""longest""", max_length=4, truncation=lowerCamelCase__, return_tensors="""np""", return_attention_mask=lowerCamelCase__, )
A : List[Any] = inputs.input_features
A : Optional[Any] = inputs.attention_mask
A : List[str] = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
A : int = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
A : List[str] = feature_extractor(
lowerCamelCase__, padding="""longest""", max_length=16, truncation=lowerCamelCase__, return_tensors="""np""", return_attention_mask=lowerCamelCase__, )
A : List[Any] = inputs.input_features
A : List[str] = inputs.attention_mask
A : List[str] = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def _lowerCAmelCase ( self ):
import torch
A : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A : Dict = np.random.rand(100, 32 ).astype(np.floataa )
A : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}], return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A : str = feature_extractor.pad([{"""input_features""": inputs}], return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
from datasets import load_dataset
A : List[str] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""", """clean""", split="""validation""" )
# automatic decoding with librispeech
A : int = ds.sort("""id""" ).select(range(lowerCamelCase__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _lowerCAmelCase ( self ):
# fmt: off
A : Optional[Any] = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
A : Optional[Any] = self._load_datasamples(1 )
A : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A : Optional[Any] = feature_extractor(lowerCamelCase__, return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], lowerCamelCase__, atol=1e-4 ) )
| 711
|
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE_:int = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
SCREAMING_SNAKE_CASE_:Union[str, Any] = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
SCREAMING_SNAKE_CASE_:Union[str, Any] = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"""predictions""": datasets.Value("""string""", id="""sequence""" ),
"""references""": datasets.Value("""string""", id="""sequence""" ),
} ), reference_urls=[], )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None, lowerCamelCase__=False, lowerCamelCase__=False, lowerCamelCase__=False, ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
A : Optional[Any] = np.array([re.sub(lowerCamelCase__, """""", lowerCamelCase__ ) for x in predictions] )
A : str = np.array([re.sub(lowerCamelCase__, """""", lowerCamelCase__ ) for x in references] )
else:
A : Dict = np.asarray(lowerCamelCase__ )
A : List[Any] = np.asarray(lowerCamelCase__ )
if ignore_case:
A : int = np.char.lower(lowerCamelCase__ )
A : Dict = np.char.lower(lowerCamelCase__ )
if ignore_punctuation:
A : str = string.punctuation.maketrans("""""", """""", string.punctuation )
A : List[str] = np.char.translate(lowerCamelCase__, table=lowerCamelCase__ )
A : Dict = np.char.translate(lowerCamelCase__, table=lowerCamelCase__ )
if ignore_numbers:
A : str = string.digits.maketrans("""""", """""", string.digits )
A : Dict = np.char.translate(lowerCamelCase__, table=lowerCamelCase__ )
A : Tuple = np.char.translate(lowerCamelCase__, table=lowerCamelCase__ )
A : Any = predictions == references
return {"exact_match": np.mean(lowerCamelCase__ ) * 100}
| 520
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : int = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase_ : Tuple = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
},
"tokenizer_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json",
},
}
UpperCAmelCase_ : List[str] = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
UpperCAmelCase_ : List[Any] = "▁"
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Dict = AlbertTokenizer
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="[CLS]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="<unk>" , lowerCamelCase_="[SEP]" , lowerCamelCase_="<pad>" , lowerCamelCase_="[CLS]" , lowerCamelCase_="[MASK]" , **lowerCamelCase_ , ) -> List[Any]:
_a : Union[str, Any] = (
AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ , normalized=lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ )
else mask_token
)
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
_a : List[Any] = do_lower_case
_a : List[str] = remove_space
_a : List[Any] = keep_accents
_a : str = vocab_file
_a : Tuple = False if not self.vocab_file else True
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
_a : List[Any] = [self.sep_token_id]
_a : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
_a : Any = [self.sep_token_id]
_a : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_a : Union[str, Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 120
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase ( self ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase = 1
_UpperCamelCase = 3
_UpperCamelCase = (32, 32)
_UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def lowercase ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowerCamelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def lowercase ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
return CLIPTextModel(lowerCamelCase_ )
def lowercase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.dummy_cond_unet_upscale
_UpperCamelCase = DDPMScheduler()
_UpperCamelCase = DDIMScheduler(prediction_type="v_prediction" )
_UpperCamelCase = self.dummy_vae
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_UpperCamelCase = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=3_50 , )
_UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_UpperCamelCase = "A painting of a squirrel eating a burger"
_UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_UpperCamelCase = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCamelCase = output.images
_UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_UpperCamelCase = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=lowerCamelCase_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
_UpperCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_UpperCamelCase = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.dummy_cond_unet_upscale
_UpperCamelCase = DDPMScheduler()
_UpperCamelCase = DDIMScheduler(prediction_type="v_prediction" )
_UpperCamelCase = self.dummy_vae
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_UpperCamelCase = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=3_50 , )
_UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_UpperCamelCase = "A painting of a squirrel eating a burger"
_UpperCamelCase = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCamelCase = output.images
assert image.shape[0] == 2
_UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_UpperCamelCase = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCamelCase = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.dummy_cond_unet_upscale
_UpperCamelCase = DDPMScheduler()
_UpperCamelCase = DDIMScheduler(prediction_type="v_prediction" )
_UpperCamelCase = self.dummy_vae
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_UpperCamelCase = unet.half()
_UpperCamelCase = text_encoder.half()
# make sure here that pndm scheduler skips prk
_UpperCamelCase = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=3_50 , )
_UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_UpperCamelCase = "A painting of a squirrel eating a burger"
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type="np" , ).images
_UpperCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_UpperCamelCase = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCamelCase = StableDiffusionUpscalePipeline.from_pretrained(lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_UpperCamelCase = "a cat sitting on a park bench"
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_UpperCamelCase = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_UpperCamelCase = "a cat sitting on a park bench"
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowercase ( self ) -> str:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCamelCase = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = "a cat sitting on a park bench"
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , output_type="np" , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 147
| 0
|
"""simple docstring"""
def lowercase (snake_case__ : str ) -> str:
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 529
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
a = 'src/transformers'
a = 'docs/source/en'
a = '.'
def lowercase (snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Tuple ) -> Any:
'''simple docstring'''
with open(snake_case__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase = f.readlines()
# Find the start prompt.
lowerCAmelCase = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
lowerCAmelCase = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
a = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
a = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
a = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
a = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
a = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase (snake_case__ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , snake_case__ )
return [m.group(0 ) for m in matches]
def lowercase (snake_case__ : str , snake_case__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase = 2 if text == """✅""" or text == """❌""" else len(snake_case__ )
lowerCAmelCase = (width - text_length) // 2
lowerCAmelCase = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase () -> str:
'''simple docstring'''
lowerCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCAmelCase = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowerCAmelCase = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowerCAmelCase = collections.defaultdict(snake_case__ )
lowerCAmelCase = collections.defaultdict(snake_case__ )
lowerCAmelCase = collections.defaultdict(snake_case__ )
lowerCAmelCase = collections.defaultdict(snake_case__ )
lowerCAmelCase = collections.defaultdict(snake_case__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(snake_case__ ):
lowerCAmelCase = None
if attr_name.endswith("""Tokenizer""" ):
lowerCAmelCase = slow_tokenizers
lowerCAmelCase = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
lowerCAmelCase = fast_tokenizers
lowerCAmelCase = attr_name[:-13]
elif _re_tf_models.match(snake_case__ ) is not None:
lowerCAmelCase = tf_models
lowerCAmelCase = _re_tf_models.match(snake_case__ ).groups()[0]
elif _re_flax_models.match(snake_case__ ) is not None:
lowerCAmelCase = flax_models
lowerCAmelCase = _re_flax_models.match(snake_case__ ).groups()[0]
elif _re_pt_models.match(snake_case__ ) is not None:
lowerCAmelCase = pt_models
lowerCAmelCase = _re_pt_models.match(snake_case__ ).groups()[0]
if lookup_dict is not None:
while len(snake_case__ ) > 0:
if attr_name in model_name_to_prefix.values():
lowerCAmelCase = True
break
# Try again after removing the last word in the name
lowerCAmelCase = """""".join(camel_case_split(snake_case__ )[:-1] )
# Let's build that table!
lowerCAmelCase = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowerCAmelCase = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowerCAmelCase = [len(snake_case__ ) + 2 for c in columns]
lowerCAmelCase = max([len(snake_case__ ) for name in model_names] ) + 2
# Build the table per se
lowerCAmelCase = """|""" + """|""".join([_center_text(snake_case__ , snake_case__ ) for c, w in zip(snake_case__ , snake_case__ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
lowerCAmelCase = {True: """✅""", False: """❌"""}
for name in model_names:
lowerCAmelCase = model_name_to_prefix[name]
lowerCAmelCase = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(snake_case__ , snake_case__ ) for l, w in zip(snake_case__ , snake_case__ )] ) + "|\n"
return table
def lowercase (snake_case__ : Dict=False ) -> Any:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = _find_text_in_file(
filename=os.path.join(snake_case__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
lowerCAmelCase = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(snake_case__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 529
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = '▁'
__SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
__SCREAMING_SNAKE_CASE = {
'google/pegasus-xsum': 512,
}
class a__ ( A__ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PegasusTokenizer
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :Any , _lowerCamelCase :Union[str, Any]=None , _lowerCamelCase :Optional[int]=None , _lowerCamelCase :Optional[Any]="<pad>" , _lowerCamelCase :Dict="</s>" , _lowerCamelCase :List[str]="<unk>" , _lowerCamelCase :Any="<mask_2>" , _lowerCamelCase :Optional[Any]="<mask_1>" , _lowerCamelCase :str=None , _lowerCamelCase :List[Any]=103 , **_lowerCamelCase :List[str] , ):
'''simple docstring'''
UpperCamelCase_ : str =offset
if additional_special_tokens is not None:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(_lowerCamelCase )}, but is'''
f''' {type(_lowerCamelCase )}''' )
UpperCamelCase_ : List[Any] =(
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(_lowerCamelCase ) , self.offset - 1 )
]
if len(set(_lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
UpperCamelCase_ : Any =additional_special_tokens_extended
else:
UpperCamelCase_ : List[str] =[mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , pad_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , mask_token=_lowerCamelCase , mask_token_sent=_lowerCamelCase , offset=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , **_lowerCamelCase , )
UpperCamelCase_ : Dict =vocab_file
UpperCamelCase_ : List[Any] =False if not self.vocab_file else True
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :Optional[int] ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :List , _lowerCamelCase :Optional[List] = None , _lowerCamelCase :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(_lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(_lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :Dict , _lowerCamelCase :Tuple=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self :Optional[int] , _lowerCamelCase :str , _lowerCamelCase :Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ : Optional[Any] =os.path.join(
_lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
return (out_vocab_file,)
| 357
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def A_ ( __lowercase ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def A_ ( __lowercase ):
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase_ : Union[str, Any] =ord(__lowercase )
if not _is_chinese_char(__lowercase ):
return 0
return 1
def A_ ( __lowercase ):
UpperCamelCase_ : List[str] =set()
for token in tokens:
UpperCamelCase_ : Optional[int] =len(__lowercase ) > 1 and is_chinese(__lowercase )
if chinese_word:
word_set.add(__lowercase )
UpperCamelCase_ : Tuple =list(__lowercase )
return word_list
def A_ ( __lowercase , __lowercase ):
if not chinese_word_set:
return bert_tokens
UpperCamelCase_ : List[str] =max([len(__lowercase ) for w in chinese_word_set] )
UpperCamelCase_ : Optional[Any] =bert_tokens
UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] =0, len(__lowercase )
while start < end:
UpperCamelCase_ : str =True
if is_chinese(bert_word[start] ):
UpperCamelCase_ : Optional[int] =min(end - start , __lowercase )
for i in range(__lowercase , 1 , -1 ):
UpperCamelCase_ : Tuple =''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase_ : Tuple ='##' + bert_word[j]
UpperCamelCase_ : int =start + i
UpperCamelCase_ : Dict =False
break
if single_word:
start += 1
return bert_word
def A_ ( __lowercase , __lowercase , __lowercase ):
UpperCamelCase_ : Tuple =[]
for i in range(0 , len(__lowercase ) , 1_00 ):
UpperCamelCase_ : Union[str, Any] =ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
UpperCamelCase_ : int =[get_chinese_word(__lowercase ) for r in res]
ltp_res.extend(__lowercase )
assert len(__lowercase ) == len(__lowercase )
UpperCamelCase_ : Dict =[]
for i in range(0 , len(__lowercase ) , 1_00 ):
UpperCamelCase_ : int =bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowercase , truncation=__lowercase , max_length=5_12 )
bert_res.extend(res['input_ids'] )
assert len(__lowercase ) == len(__lowercase )
UpperCamelCase_ : Dict =[]
for input_ids, chinese_word in zip(__lowercase , __lowercase ):
UpperCamelCase_ : List[str] =[]
for id in input_ids:
UpperCamelCase_ : Union[str, Any] =bert_tokenizer._convert_id_to_token(__lowercase )
input_tokens.append(__lowercase )
UpperCamelCase_ : Optional[int] =add_sub_symbol(__lowercase , __lowercase )
UpperCamelCase_ : Dict =[]
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowercase ):
if token[:2] == "##":
UpperCamelCase_ : Optional[int] =token[2:]
# save chinese tokens' pos
if len(__lowercase ) == 1 and _is_chinese_char(ord(__lowercase ) ):
ref_id.append(__lowercase )
ref_ids.append(__lowercase )
assert len(__lowercase ) == len(__lowercase )
return ref_ids
def A_ ( __lowercase ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
UpperCamelCase_ : Tuple =f.readlines()
UpperCamelCase_ : Optional[int] =[line.strip() for line in data if len(__lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase_ : Optional[Any] =LTP(args.ltp ) # faster in GPU device
UpperCamelCase_ : Dict =BertTokenizer.from_pretrained(args.bert )
UpperCamelCase_ : int =prepare_ref(__lowercase , __lowercase , __lowercase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
UpperCamelCase_ : Tuple =[json.dumps(__lowercase ) + '\n' for ref in ref_ids]
f.writelines(__lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 357
| 1
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowercase__ ( unittest.TestCase , _UpperCAmelCase ):
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = load_tool('text-to-speech' )
self.tool.setup()
def A_ ( self : Tuple ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = self.tool('hey' )
SCREAMING_SNAKE_CASE__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def A_ ( self : List[str] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = self.tool('hey' )
SCREAMING_SNAKE_CASE__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 400
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 400
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=8 ):
__lowercase : Optional[int] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowercase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase_ ( snake_case ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
__lowercase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
if latents is None:
__lowercase : Optional[int] = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
__lowercase : Optional[Any] = latents.to(UpperCamelCase_ )
__lowercase : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def _lowerCamelCase ( self , UpperCamelCase_=0 ) -> Optional[int]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__lowercase : Tuple = torch.device(F"""cuda:{gpu_id}""" )
__lowercase : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_=0 ) -> Any:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
__lowercase : str = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowercase : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowercase ,__lowercase : Any = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
__lowercase : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowerCamelCase ( self ) -> List[Any]:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 5_12 , UpperCamelCase_ = 5_12 , UpperCamelCase_ = 1_00 , UpperCamelCase_ = 4.0 , UpperCamelCase_ = 1 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "pil" , UpperCamelCase_ = True , ) -> Union[str, Any]:
__lowercase : Optional[int] = self._execution_device
__lowercase : int = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowercase : Tuple = torch.cat(UpperCamelCase_ , dim=0 )
__lowercase : List[str] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowercase : Any = torch.cat(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
__lowercase : Any = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowercase : List[str] = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowercase : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
__lowercase : Dict = self.scheduler.timesteps
__lowercase : List[Any] = self.unet.config.in_channels
__lowercase ,__lowercase : Any = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
__lowercase : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
__lowercase : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase : List[Any] = {'''image_embeds''': image_embeds}
__lowercase : Dict = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
__lowercase ,__lowercase : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
__lowercase ,__lowercase : Optional[int] = noise_pred.chunk(2 )
__lowercase ,__lowercase : int = variance_pred.chunk(2 )
__lowercase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowercase : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowercase ,__lowercase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowercase : int = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
__lowercase : Dict = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__lowercase : int = image * 0.5 + 0.5
__lowercase : int = image.clamp(0 , 1 )
__lowercase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase : Optional[Any] = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 76
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''albert'''
def __init__( self , __lowerCAmelCase=30000 , __lowerCAmelCase=128 , __lowerCAmelCase=4096 , __lowerCAmelCase=12 , __lowerCAmelCase=1 , __lowerCAmelCase=64 , __lowerCAmelCase=16384 , __lowerCAmelCase=1 , __lowerCAmelCase="gelu_new" , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-1_2 , __lowerCAmelCase=0.1 , __lowerCAmelCase="absolute" , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = vocab_size
lowerCAmelCase = embedding_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_hidden_groups
lowerCAmelCase = num_attention_heads
lowerCAmelCase = inner_group_num
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = classifier_dropout_prob
lowerCAmelCase = position_embedding_type
class a__( lowerCAmelCase__ ):
'''simple docstring'''
@property
def a_ ( self):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
])
| 370
| 0
|
import math
import flax.linen as nn
import jax.numpy as jnp
def A ( snake_case :str , snake_case :Optional[Any] , snake_case :Optional[int] = 1 , snake_case :Union[str, Any] = 1 , snake_case :Union[str, Any] = 1.0e4 , snake_case :List[str] = False , snake_case :int = 1.0 , ) -> Tuple:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'Embedding dimension {embedding_dim} should be even'
__UpperCamelCase = float(embedding_dim // 2 )
__UpperCamelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__UpperCamelCase = min_timescale * jnp.exp(jnp.arange(A_ , dtype=jnp.floataa ) * -log_timescale_increment )
__UpperCamelCase = jnp.expand_dims(A_ , 1 ) * jnp.expand_dims(A_ , 0 )
# scale embeddings
__UpperCamelCase = scale * emb
if flip_sin_to_cos:
__UpperCamelCase = jnp.concatenate([jnp.cos(A_ ), jnp.sin(A_ )] , axis=1 )
else:
__UpperCamelCase = jnp.concatenate([jnp.sin(A_ ), jnp.cos(A_ )] , axis=1 )
__UpperCamelCase = jnp.reshape(A_ , [jnp.shape(A_ )[0], embedding_dim] )
return signal
class __lowerCAmelCase ( nn.Module ):
lowercase = 32
lowercase = jnp.floataa
@nn.compact
def __call__( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(UpperCamelCase__ )
__UpperCamelCase = nn.silu(UpperCamelCase__ )
__UpperCamelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(UpperCamelCase__ )
return temb
class __lowerCAmelCase ( nn.Module ):
lowercase = 32
lowercase = False
lowercase = 1
@nn.compact
def __call__( self , __UpperCAmelCase ):
'''simple docstring'''
return get_sinusoidal_embeddings(
UpperCamelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 710
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : List[Any] = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : int = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 293
| 0
|
from __future__ import annotations
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Optional[int] = list(range(len(lowercase__ ) ) )
__lowerCAmelCase : int = [v / w for v, w in zip(lowercase__ , lowercase__ )]
index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ )
__lowerCAmelCase : float = 0
__lowerCAmelCase : list[float] = [0] * len(lowercase__ )
for i in index:
if weight[i] <= capacity:
__lowerCAmelCase : str = 1
max_value += value[i]
capacity -= weight[i]
else:
__lowerCAmelCase : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 492
|
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _lowercase ( lowercase__ , lowercase__ , lowercase__=0 ):
# Format the message.
if name is None:
__lowerCAmelCase : Any = None
else:
__lowerCAmelCase : Union[str, Any] = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(5_0 - spaces ) + '''s}'''
__lowerCAmelCase : Dict = fmt.format(lowercase__ )
# Print and recurse (if needed).
if isinstance(lowercase__ , lowercase__ ):
if msg is not None:
print(lowercase__ )
for k in val.keys():
recursive_print(lowercase__ , val[k] , spaces + 2 )
elif isinstance(lowercase__ , torch.Tensor ):
print(lowercase__ , ''':''' , val.size() )
else:
print(lowercase__ , ''':''' , lowercase__ )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
__lowerCAmelCase : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
__lowerCAmelCase : Tuple = (num_heads, hidden_size, num_splits) + input_shape[1:]
__lowerCAmelCase : int = param.view(*lowercase__ )
__lowerCAmelCase : int = param.transpose(0 , 2 )
__lowerCAmelCase : Any = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
__lowerCAmelCase : Union[str, Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
__lowerCAmelCase : str = param.view(*lowercase__ )
__lowerCAmelCase : List[str] = param.transpose(0 , 1 ).contiguous()
__lowerCAmelCase : List[str] = param.view(*lowercase__ )
return param
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
# The converted output model.
__lowerCAmelCase : Any = {}
# old versions did not store training args
__lowerCAmelCase : Optional[int] = input_state_dict.get('''args''' , lowercase__ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
__lowerCAmelCase : List[str] = ds_args.padded_vocab_size
__lowerCAmelCase : List[str] = ds_args.max_position_embeddings
__lowerCAmelCase : Optional[int] = ds_args.hidden_size
__lowerCAmelCase : Tuple = ds_args.num_layers
__lowerCAmelCase : int = ds_args.num_attention_heads
__lowerCAmelCase : Union[str, Any] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
__lowerCAmelCase : Union[str, Any] = config.n_head
# The hidden_size per head.
__lowerCAmelCase : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
__lowerCAmelCase : Union[str, Any] = input_state_dict['''checkpoint_version''']
else:
__lowerCAmelCase : Optional[Any] = 0.0
# The model.
__lowerCAmelCase : int = input_state_dict['''model''']
# The language model.
__lowerCAmelCase : str = model['''language_model''']
# The embeddings.
__lowerCAmelCase : Optional[Any] = lm['''embedding''']
# The word embeddings.
__lowerCAmelCase : Tuple = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
__lowerCAmelCase : Dict = word_embeddings[: config.vocab_size, :]
__lowerCAmelCase : int = word_embeddings
# The position embeddings.
__lowerCAmelCase : Optional[Any] = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
__lowerCAmelCase : Dict = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
__lowerCAmelCase : Any = pos_embeddings
# The transformer.
__lowerCAmelCase : Dict = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
__lowerCAmelCase : Any = re.compile(r'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
__lowerCAmelCase : Optional[Any] = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
__lowerCAmelCase : str = layer_re.match(lowercase__ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
__lowerCAmelCase : Tuple = int(m.group(1 ) )
# The name of the operation.
__lowerCAmelCase : Dict = m.group(2 )
# Is it a weight or a bias?
__lowerCAmelCase : Optional[Any] = m.group(3 )
# The name of the layer.
__lowerCAmelCase : Dict = f"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
__lowerCAmelCase : List[Any] = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
__lowerCAmelCase : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
__lowerCAmelCase : List[str] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowercase__ , lowercase__ )
__lowerCAmelCase : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
__lowerCAmelCase : str = torch.tensor(-1E4 , dtype=torch.floataa )
__lowerCAmelCase : str = masked_bias
__lowerCAmelCase : Tuple = fix_query_key_value_ordering(lowercase__ , lowercase__ , 3 , lowercase__ , lowercase__ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
__lowerCAmelCase : int = out_val.transpose(0 , 1 ).contiguous()
# Store.
__lowerCAmelCase : Dict = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
__lowerCAmelCase : List[str] = fix_query_key_value_ordering(lowercase__ , lowercase__ , 3 , lowercase__ , lowercase__ )
# Store. No change of shape.
__lowerCAmelCase : int = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
__lowerCAmelCase : List[Any] = megatron_to_transformers[op_name]
__lowerCAmelCase : Optional[int] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
__lowerCAmelCase : Optional[int] = megatron_to_transformers[op_name]
__lowerCAmelCase : Any = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
__lowerCAmelCase : str = transformer['''final_layernorm.weight''']
__lowerCAmelCase : List[Any] = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
__lowerCAmelCase : List[Any] = word_embeddings
# It should be done!
return output_state_dict
def _lowercase ( ):
# Create the argument parser.
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''' , type=lowercase__ , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=lowercase__ , help='''An optional config json file describing the pre-trained model.''' , )
__lowerCAmelCase : Optional[int] = parser.parse_args()
# Extract the basename.
__lowerCAmelCase : Union[str, Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
__lowerCAmelCase : Union[str, Any] = torch.load(lowercase__ , map_location='''cpu''' )
else:
__lowerCAmelCase : Optional[int] = torch.load(args.path_to_checkpoint , map_location='''cpu''' )
__lowerCAmelCase : int = input_state_dict.get('''args''' , lowercase__ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
__lowerCAmelCase : List[Any] = '''gelu_fast'''
elif ds_args.openai_gelu:
__lowerCAmelCase : Union[str, Any] = '''gelu_new'''
else:
__lowerCAmelCase : str = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
__lowerCAmelCase : int = '''gelu_new'''
# Spell out all parameters in case the defaults change.
__lowerCAmelCase : str = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=lowercase__ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.0_2 , summary_type='''cls_index''' , summary_use_proj=lowercase__ , summary_activation=lowercase__ , summary_proj_to_labels=lowercase__ , summary_first_dropout=0.1 , scale_attn_weights=lowercase__ , use_cache=lowercase__ , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
__lowerCAmelCase : List[str] = GPTaConfig.from_json_file(args.config_file )
__lowerCAmelCase : List[str] = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
__lowerCAmelCase : List[Any] = convert_megatron_checkpoint(lowercase__ , lowercase__ , lowercase__ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowercase__ , lowercase__ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
__lowerCAmelCase : Any = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
__lowerCAmelCase : Any = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
__lowerCAmelCase : Optional[int] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
__lowerCAmelCase : str = '''gpt2'''
__lowerCAmelCase : int = AutoTokenizer.from_pretrained(lowercase__ )
__lowerCAmelCase : Tuple = type(lowercase__ ).__name__
__lowerCAmelCase : Dict = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(lowercase__ )
# Save tokenizer based on args
print(f"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(lowercase__ )
# Store the state_dict to file.
__lowerCAmelCase : List[str] = os.path.join(lowercase__ , '''pytorch_model.bin''' )
print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(lowercase__ , lowercase__ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 492
| 1
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Tuple = logging.get_logger(__name__)
lowerCAmelCase__ : Tuple = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
lowerCAmelCase__ : str = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
lowerCAmelCase__ : Dict = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = set()
UpperCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ = char
UpperCAmelCase__ = set(lowerCamelCase )
return pairs
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str ,lowerCamelCase__ : Any ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[str]="<s>" ,lowerCamelCase__ : Optional[int]="</s>" ,lowerCamelCase__ : Any="</s>" ,lowerCamelCase__ : str="<s>" ,lowerCamelCase__ : List[str]="<unk>" ,lowerCamelCase__ : Union[str, Any]="<pad>" ,lowerCamelCase__ : Optional[Any]="<mask>" ,**lowerCamelCase__ : List[Any] ,):
super().__init__(
bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = merges_file
UpperCAmelCase__ = {}
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
self.add_from_file(lowerCamelCase__ )
UpperCAmelCase__ = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase__ ,encoding='utf-8' ) as merges_handle:
UpperCAmelCase__ = merges_handle.read().split('\n' )[:-1]
UpperCAmelCase__ = [tuple(merge.split()[:-1] ) for merge in merges]
UpperCAmelCase__ = dict(zip(lowerCamelCase__ ,range(len(lowerCamelCase__ ) ) ) )
UpperCAmelCase__ = {}
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
UpperCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self : List[str] ):
return len(self.encoder )
def __lowerCAmelCase ( self : Any ):
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : Any ):
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ = tuple(lowerCamelCase__ )
UpperCAmelCase__ = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
UpperCAmelCase__ = get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
UpperCAmelCase__ = min(lowerCamelCase__ ,key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ = bigram
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
while i < len(lowerCamelCase__ ):
try:
UpperCAmelCase__ = word.index(lowerCamelCase__ ,lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ = j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ = tuple(lowerCamelCase__ )
UpperCAmelCase__ = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
UpperCAmelCase__ = get_pairs(lowerCamelCase__ )
UpperCAmelCase__ = '@@ '.join(lowerCamelCase__ )
UpperCAmelCase__ = word[:-4]
UpperCAmelCase__ = word
return word
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = []
UpperCAmelCase__ = re.findall(R'\S+\n?' ,lowerCamelCase__ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase__ ).split(' ' ) ) )
return split_tokens
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ):
return self.encoder.get(lowerCamelCase__ ,self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : List[Any] ):
return self.decoder.get(lowerCamelCase__ ,self.unk_token )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = ' '.join(lowerCamelCase__ ).replace('@@ ' ,'' ).strip()
return out_string
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ = os.path.join(
lowerCamelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ = os.path.join(
lowerCamelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file ,lowerCamelCase__ )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.merges_file ,lowerCamelCase__ )
return out_vocab_file, out_merge_file
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : Dict ):
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
try:
with open(lowerCamelCase__ ,'r' ,encoding='utf-8' ) as fd:
self.add_from_file(lowerCamelCase__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
UpperCAmelCase__ = f.readlines()
for lineTmp in lines:
UpperCAmelCase__ = lineTmp.strip()
UpperCAmelCase__ = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
UpperCAmelCase__ = line[:idx]
UpperCAmelCase__ = len(self.encoder )
| 632
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase__ : Dict = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def a_ ( ):
UpperCAmelCase__ = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase__ = get_sagemaker_input()
else:
UpperCAmelCase__ = get_cluster_input()
return config
def a_ ( lowerCamelCase=None ):
if subparsers is not None:
UpperCAmelCase__ = subparsers.add_parser('config' , description=lowerCamelCase )
else:
UpperCAmelCase__ = argparse.ArgumentParser('Accelerate config command' , description=lowerCamelCase )
parser.add_argument(
'--config_file' , default=lowerCamelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase )
return parser
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = get_user_input()
if args.config_file is not None:
UpperCAmelCase__ = args.config_file
else:
if not os.path.isdir(lowerCamelCase ):
os.makedirs(lowerCamelCase )
UpperCAmelCase__ = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowerCamelCase )
else:
config.to_yaml_file(lowerCamelCase )
print(f'''accelerate configuration saved at {config_file}''' )
def a_ ( ):
UpperCAmelCase__ = config_command_parser()
UpperCAmelCase__ = parser.parse_args()
config_command(lowerCamelCase )
if __name__ == "__main__":
main()
| 632
| 1
|
lowerCamelCase_ : Tuple = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
lowerCamelCase_ : Union[str, Any] = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> float:
UpperCamelCase_: Any = from_type.lower().strip("""s""" )
UpperCamelCase_: int = to_type.lower().strip("""s""" )
UpperCamelCase_: Any = UNIT_SYMBOL.get(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: str = UNIT_SYMBOL.get(lowerCamelCase , lowerCamelCase )
if from_sanitized not in METRIC_CONVERSION:
UpperCamelCase_: Optional[int] = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
if to_sanitized not in METRIC_CONVERSION:
UpperCamelCase_: Dict = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
UpperCamelCase_: Union[str, Any] = METRIC_CONVERSION[from_sanitized]
UpperCamelCase_: str = METRIC_CONVERSION[to_sanitized]
UpperCamelCase_: Tuple = 1
if from_exponent > to_exponent:
UpperCamelCase_: Union[str, Any] = from_exponent - to_exponent
else:
UpperCamelCase_: Dict = -(to_exponent - from_exponent)
return value * pow(10 , lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 548
|
from __future__ import annotations
import time
lowerCamelCase_ : Union[str, Any] = list[tuple[int, int]]
lowerCamelCase_ : str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase_ : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , snake_case_ : int , snake_case_ : int , snake_case_ : int , snake_case_ : int , snake_case_ : Node | None ):
UpperCamelCase_: str = pos_x
UpperCamelCase_: List[str] = pos_y
UpperCamelCase_: str = (pos_y, pos_x)
UpperCamelCase_: Any = goal_x
UpperCamelCase_: Optional[int] = goal_y
UpperCamelCase_: Union[str, Any] = parent
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : str , snake_case_ : tuple[int, int] , snake_case_ : tuple[int, int] ):
UpperCamelCase_: Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , snake_case_ )
UpperCamelCase_: List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , snake_case_ )
UpperCamelCase_: Any = [self.start]
UpperCamelCase_: Dict = False
def lowerCAmelCase__ ( self : str ):
while self.node_queue:
UpperCamelCase_: int = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
UpperCamelCase_: List[str] = True
return self.retrace_path(snake_case_ )
UpperCamelCase_: List[str] = self.get_successors(snake_case_ )
for node in successors:
self.node_queue.append(snake_case_ )
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase__ ( self : Dict , snake_case_ : Node ):
UpperCamelCase_: int = []
for action in delta:
UpperCamelCase_: Union[str, Any] = parent.pos_x + action[1]
UpperCamelCase_: str = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , snake_case_ ) )
return successors
def lowerCAmelCase__ ( self : int , snake_case_ : Node | None ):
UpperCamelCase_: int = node
UpperCamelCase_: Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCamelCase_: List[Any] = current_node.parent
path.reverse()
return path
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ):
UpperCamelCase_: Tuple = BreadthFirstSearch(snake_case_ , snake_case_ )
UpperCamelCase_: Dict = BreadthFirstSearch(snake_case_ , snake_case_ )
UpperCamelCase_: int = False
def lowerCAmelCase__ ( self : int ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
UpperCamelCase_: List[Any] = self.fwd_bfs.node_queue.pop(0 )
UpperCamelCase_: Optional[Any] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
UpperCamelCase_: List[Any] = True
return self.retrace_bidirectional_path(
snake_case_ , snake_case_ )
UpperCamelCase_: Optional[Any] = current_bwd_node
UpperCamelCase_: List[Any] = current_fwd_node
UpperCamelCase_: List[str] = {
self.fwd_bfs: self.fwd_bfs.get_successors(snake_case_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(snake_case_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(snake_case_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: List[str] = self.fwd_bfs.retrace_path(snake_case_ )
UpperCamelCase_: Tuple = self.bwd_bfs.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
UpperCamelCase_: List[str] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCamelCase_ : Tuple = (0, 0)
lowerCamelCase_ : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCamelCase_ : Optional[int] = time.time()
lowerCamelCase_ : Tuple = BreadthFirstSearch(init, goal)
lowerCamelCase_ : List[str] = bfs.search()
lowerCamelCase_ : Optional[int] = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
lowerCamelCase_ : Optional[int] = time.time()
lowerCamelCase_ : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
lowerCamelCase_ : str = bd_bfs.search()
lowerCamelCase_ : Tuple = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 548
| 1
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = BarthezTokenizer
SCREAMING_SNAKE_CASE = BarthezTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : List[Any] = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = tokenizer
def _SCREAMING_SNAKE_CASE ( self: str) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = "<pad>"
__lowerCAmelCase : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(vocab_keys[-1] , "<mask>")
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , 10_1122)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122)
@require_torch
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
__lowerCAmelCase : str = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__lowerCAmelCase : Optional[Any] = [0, 57, 3018, 7_0307, 91, 2]
__lowerCAmelCase : List[Any] = self.tokenizer(
_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors="pt")
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
__lowerCAmelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : str = self.get_rust_tokenizer()
__lowerCAmelCase : List[str] = "I was born in 92000, and this is falsé."
__lowerCAmelCase : Union[str, Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = {"input_ids": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__lowerCAmelCase : Any = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=_SCREAMING_SNAKE_CASE , )
| 615
|
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
assert isinstance(__snake_case ,__snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Any:
__lowerCAmelCase : Dict = tmp_path / "cache"
__lowerCAmelCase : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase : Optional[Any] = ParquetDatasetReader(__snake_case ,cache_dir=__snake_case ,keep_in_memory=__snake_case ).read()
_check_parquet_dataset(__snake_case ,__snake_case )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> str:
__lowerCAmelCase : Tuple = tmp_path / "cache"
__lowerCAmelCase : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase : List[Any] = features.copy() if features else default_expected_features
__lowerCAmelCase : Dict = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase : Optional[int] = ParquetDatasetReader(__snake_case ,features=__snake_case ,cache_dir=__snake_case ).read()
_check_parquet_dataset(__snake_case ,__snake_case )
@pytest.mark.parametrize("split" ,[None, NamedSplit("train" ), "train", "test"] )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Any:
__lowerCAmelCase : List[str] = tmp_path / "cache"
__lowerCAmelCase : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase : List[Any] = ParquetDatasetReader(__snake_case ,cache_dir=__snake_case ,split=__snake_case ).read()
_check_parquet_dataset(__snake_case ,__snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" ,[str, list] )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
if issubclass(__snake_case ,__snake_case ):
__lowerCAmelCase : List[Any] = parquet_path
elif issubclass(__snake_case ,__snake_case ):
__lowerCAmelCase : List[Any] = [parquet_path]
__lowerCAmelCase : str = tmp_path / "cache"
__lowerCAmelCase : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase : Tuple = ParquetDatasetReader(__snake_case ,cache_dir=__snake_case ).read()
_check_parquet_dataset(__snake_case ,__snake_case )
def _lowercase ( __snake_case ,__snake_case ,__snake_case=("train",) ) -> int:
assert isinstance(__snake_case ,__snake_case )
for split in splits:
__lowerCAmelCase : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> str:
__lowerCAmelCase : Any = tmp_path / "cache"
__lowerCAmelCase : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase : str = ParquetDatasetReader(
{"train": parquet_path} ,cache_dir=__snake_case ,keep_in_memory=__snake_case ).read()
_check_parquet_datasetdict(__snake_case ,__snake_case )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
__lowerCAmelCase : List[Any] = tmp_path / "cache"
__lowerCAmelCase : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase : str = features.copy() if features else default_expected_features
__lowerCAmelCase : str = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase : str = ParquetDatasetReader({"train": parquet_path} ,features=__snake_case ,cache_dir=__snake_case ).read()
_check_parquet_datasetdict(__snake_case ,__snake_case )
@pytest.mark.parametrize("split" ,[None, NamedSplit("train" ), "train", "test"] )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> str:
if split:
__lowerCAmelCase : Optional[int] = {split: parquet_path}
else:
__lowerCAmelCase : str = "train"
__lowerCAmelCase : Optional[Any] = {"train": parquet_path, "test": parquet_path}
__lowerCAmelCase : Tuple = tmp_path / "cache"
__lowerCAmelCase : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase : Union[str, Any] = ParquetDatasetReader(__snake_case ,cache_dir=__snake_case ).read()
_check_parquet_datasetdict(__snake_case ,__snake_case ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _lowercase ( __snake_case ,__snake_case ) -> Union[str, Any]:
__lowerCAmelCase : int = ParquetDatasetWriter(__snake_case ,tmp_path / "foo.parquet" )
assert writer.write() > 0
__lowerCAmelCase : str = pq.ParquetFile(tmp_path / "foo.parquet" )
__lowerCAmelCase : int = pf.read()
assert dataset.data.table == output_table
def _lowercase ( __snake_case ,__snake_case ) -> int:
__lowerCAmelCase : int = str(shared_datadir / "test_image_rgb.jpg" )
__lowerCAmelCase : Any = {"image": [image_path]}
__lowerCAmelCase : List[Any] = Features({"image": Image()} )
__lowerCAmelCase : Tuple = Dataset.from_dict(__snake_case ,features=__snake_case )
__lowerCAmelCase : Dict = ParquetDatasetWriter(__snake_case ,tmp_path / "foo.parquet" )
assert writer.write() > 0
__lowerCAmelCase : List[Any] = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
__lowerCAmelCase : List[Any] = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) ,streaming=__snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" ,[
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] ,)
def _lowercase ( __snake_case ,__snake_case ) -> List[Any]:
assert get_writer_batch_size(__snake_case ) == expected
| 615
| 1
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger("""transformers.models.speecht5""")
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
hf_model.apply_weight_norm()
UpperCAmelCase_ = checkpoint['''input_conv.weight_g''']
UpperCAmelCase_ = checkpoint['''input_conv.weight_v''']
UpperCAmelCase_ = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase_ = checkpoint[f"""upsamples.{i}.1.weight_g"""]
UpperCAmelCase_ = checkpoint[f"""upsamples.{i}.1.weight_v"""]
UpperCAmelCase_ = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase_ = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
UpperCAmelCase_ = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
UpperCAmelCase_ = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
UpperCAmelCase_ = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
UpperCAmelCase_ = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
UpperCAmelCase_ = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
UpperCAmelCase_ = checkpoint['''output_conv.1.weight_g''']
UpperCAmelCase_ = checkpoint['''output_conv.1.weight_v''']
UpperCAmelCase_ = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , ):
if config_path is not None:
UpperCAmelCase_ = SpeechTaHifiGanConfig.from_pretrained(_A )
else:
UpperCAmelCase_ = SpeechTaHifiGanConfig()
UpperCAmelCase_ = SpeechTaHifiGan(_A )
UpperCAmelCase_ = torch.load(_A )
load_weights(orig_checkpoint["model"]["generator"] , _A , _A )
UpperCAmelCase_ = np.load(_A )
UpperCAmelCase_ = stats[0].reshape(-1 )
UpperCAmelCase_ = stats[1].reshape(-1 )
UpperCAmelCase_ = torch.from_numpy(_A ).float()
UpperCAmelCase_ = torch.from_numpy(_A ).float()
model.save_pretrained(_A )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(_A )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 82
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : str = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 614
| 0
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
SCREAMING_SNAKE_CASE__ = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def lowercase__ ( )-> Union[str, Any]:
UpperCamelCase = os.path.dirname(os.path.realpath(__UpperCamelCase ) )
UpperCamelCase = os.path.join(__UpperCamelCase , """words.txt""" )
UpperCamelCase = ""
with open(__UpperCamelCase ) as f:
UpperCamelCase = f.readline()
UpperCamelCase = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
UpperCamelCase = [
word
for word in [sum(ord(__UpperCamelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 718
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> str:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
UpperCamelCase = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : str =MvpTokenizer
__A : Optional[Any] =MvpTokenizerFast
__A : Optional[int] =True
__A : int =filter_roberta_detectors
def UpperCamelCase__ ( self ):
super().setUp()
UpperCAmelCase_ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase_ : Tuple = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase_ : Tuple = {"unk_token": "<unk>"}
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
def UpperCamelCase__ ( self ,**_snake_case ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
return "lower newer", "lower newer"
@cached_property
def UpperCamelCase__ ( self ):
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def UpperCamelCase__ ( self ):
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase_ : Union[str, Any] = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ : Optional[int] = tokenizer(_snake_case ,max_length=len(_snake_case ) ,padding=_snake_case ,return_tensors="pt" )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
UpperCAmelCase_ : int = batch.input_ids.tolist()[0]
self.assertListEqual(_snake_case ,_snake_case )
# Test that special tokens are reset
@require_torch
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ : int = tokenizer(_snake_case ,padding=_snake_case ,return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" ,_snake_case )
self.assertIn("attention_mask" ,_snake_case )
self.assertNotIn("labels" ,_snake_case )
self.assertNotIn("decoder_attention_mask" ,_snake_case )
@require_torch
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ : Optional[Any] = tokenizer(text_target=_snake_case ,max_length=32 ,padding="max_length" ,return_tensors="pt" )
self.assertEqual(32 ,targets["input_ids"].shape[1] )
@require_torch
def UpperCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ : Tuple = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] ,padding=_snake_case ,truncation=_snake_case ,return_tensors="pt" )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(batch.input_ids.shape ,(2, 10_24) )
@require_torch
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = ["A long paragraph for summarization."]
UpperCAmelCase_ : Union[str, Any] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ : Union[str, Any] = tokenizer(_snake_case ,text_target=_snake_case ,return_tensors="pt" )
UpperCAmelCase_ : Tuple = inputs["input_ids"]
UpperCAmelCase_ : Any = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(_snake_case ,**_snake_case )
UpperCAmelCase_ : int = self.tokenizer_class.from_pretrained(_snake_case ,**_snake_case )
UpperCAmelCase_ : str = "A, <mask> AllenNLP sentence."
UpperCAmelCase_ : List[str] = tokenizer_r.encode_plus(_snake_case ,add_special_tokens=_snake_case ,return_token_type_ids=_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer_p.encode_plus(_snake_case ,add_special_tokens=_snake_case ,return_token_type_ids=_snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) ,sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) ,sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) ,)
UpperCAmelCase_ : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCAmelCase_ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_snake_case ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
_snake_case ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 71
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
A__ : List[str] = logging.get_logger(__name__)
class lowercase__ ( lowerCamelCase__ ):
def __init__( self : Dict , *snake_case__ : str , **snake_case__ : str ):
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 702
|
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
A__ : str = logging.get_logger(__name__)
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Optional[Any] = "AutoTokenizer"
_UpperCAmelCase :Any = ["tokenizer"]
_UpperCAmelCase :Optional[Any] = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any]=None ):
super().__init__(snake_case__ )
lowerCamelCase_ : List[Any] =speaker_embeddings
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , snake_case__ : Any , snake_case__ : str="speaker_embeddings_path.json" , **snake_case__ : Union[str, Any] ):
if speaker_embeddings_dict_path is not None:
lowerCamelCase_ : str =get_file_from_repo(
snake_case__ , snake_case__ , subfolder=kwargs.pop("subfolder" , snake_case__ ) , cache_dir=kwargs.pop("cache_dir" , snake_case__ ) , force_download=kwargs.pop("force_download" , snake_case__ ) , proxies=kwargs.pop("proxies" , snake_case__ ) , resume_download=kwargs.pop("resume_download" , snake_case__ ) , local_files_only=kwargs.pop("local_files_only" , snake_case__ ) , use_auth_token=kwargs.pop("use_auth_token" , snake_case__ ) , revision=kwargs.pop("revision" , snake_case__ ) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(snake_case__ , snake_case__ )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
lowerCamelCase_ : Any =None
else:
with open(snake_case__ ) as speaker_embeddings_json:
lowerCamelCase_ : List[str] =json.load(snake_case__ )
else:
lowerCamelCase_ : List[Any] =None
lowerCamelCase_ : Optional[Any] =AutoTokenizer.from_pretrained(snake_case__ , **snake_case__ )
return cls(tokenizer=snake_case__ , speaker_embeddings=snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any]="speaker_embeddings_path.json" , snake_case__ : Tuple="speaker_embeddings" , snake_case__ : bool = False , **snake_case__ : str , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(snake_case__ , snake_case__ , "v2" ) , exist_ok=snake_case__ )
lowerCamelCase_ : Tuple ={}
lowerCamelCase_ : Dict =save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowerCamelCase_ : List[str] =self._load_voice_preset(snake_case__ )
lowerCamelCase_ : Union[str, Any] ={}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , snake_case__ , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=snake_case__ , )
lowerCamelCase_ : List[str] =os.path.join(snake_case__ , F"""{prompt_key}_{key}.npy""" )
lowerCamelCase_ : Tuple =tmp_dict
with open(os.path.join(snake_case__ , snake_case__ ) , "w" ) as fp:
json.dump(snake_case__ , snake_case__ )
super().save_pretrained(snake_case__ , snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : str = None , **snake_case__ : Dict ):
lowerCamelCase_ : int =self.speaker_embeddings[voice_preset]
lowerCamelCase_ : Any ={}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
lowerCamelCase_ : Dict =get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , snake_case__ ) , cache_dir=kwargs.pop("cache_dir" , snake_case__ ) , force_download=kwargs.pop("force_download" , snake_case__ ) , proxies=kwargs.pop("proxies" , snake_case__ ) , resume_download=kwargs.pop("resume_download" , snake_case__ ) , local_files_only=kwargs.pop("local_files_only" , snake_case__ ) , use_auth_token=kwargs.pop("use_auth_token" , snake_case__ ) , revision=kwargs.pop("revision" , snake_case__ ) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
lowerCamelCase_ : str =np.load(snake_case__ )
return voice_preset_dict
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Optional[dict] = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self : int , snake_case__ : List[str]=None , snake_case__ : int=None , snake_case__ : int="pt" , snake_case__ : Optional[Any]=256 , snake_case__ : int=False , snake_case__ : List[str]=True , snake_case__ : List[Any]=False , **snake_case__ : Any , ):
if voice_preset is not None and not isinstance(snake_case__ , snake_case__ ):
if (
isinstance(snake_case__ , snake_case__ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowerCamelCase_ : Union[str, Any] =self._load_voice_preset(snake_case__ )
else:
if isinstance(snake_case__ , snake_case__ ) and not voice_preset.endswith(".npz" ):
lowerCamelCase_ : str =voice_preset + ".npz"
lowerCamelCase_ : Optional[int] =np.load(snake_case__ )
if voice_preset is not None:
self._validate_voice_preset_dict(snake_case__ , **snake_case__ )
lowerCamelCase_ : List[Any] =BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
lowerCamelCase_ : List[str] =self.tokenizer(
snake_case__ , return_tensors=snake_case__ , padding="max_length" , max_length=snake_case__ , return_attention_mask=snake_case__ , return_token_type_ids=snake_case__ , add_special_tokens=snake_case__ , **snake_case__ , )
if voice_preset is not None:
lowerCamelCase_ : Optional[Any] =voice_preset
return encoded_text
| 244
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = ["""image_processor""", """tokenizer"""]
__a = """BridgeTowerImageProcessor"""
__a = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : str ):
'''simple docstring'''
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[bool, str, PaddingStrategy] = False , UpperCamelCase : Union[bool, str, TruncationStrategy] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : int = 0 , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[str, TensorType]] = None , **UpperCamelCase : Any , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.tokenizer(
text=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
# add pixel_values + pixel_mask
__UpperCAmelCase : Dict = self.image_processor(
UpperCamelCase , return_tensors=UpperCamelCase , do_normalize=UpperCamelCase , do_center_crop=UpperCamelCase , **UpperCamelCase )
encoding.update(UpperCamelCase )
return encoding
def lowerCamelCase__ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : int ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : int , *UpperCamelCase : Optional[Any] , **UpperCamelCase : str ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.tokenizer.model_input_names
__UpperCAmelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 139
|
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase__ ( A , A ):
"""simple docstring"""
__a = 1
@register_to_config
def __init__( self : Dict , UpperCamelCase : int = 1_000 , UpperCamelCase : Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(UpperCamelCase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : str = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : int = 4
# running values
__UpperCAmelCase : Union[str, Any] = []
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = num_inference_steps
__UpperCAmelCase : Tuple = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : int = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : Union[str, Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Optional[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Any = timesteps.to(UpperCamelCase )
__UpperCAmelCase : List[Any] = []
def lowerCamelCase__ ( self : Dict , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[Any] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : List[str] = timestep_index + 1
__UpperCAmelCase : Optional[Any] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase )
if len(self.ets ) == 1:
__UpperCAmelCase : List[Any] = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : List[str] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Tuple = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : int = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : int = self._get_prev_sample(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : torch.FloatTensor , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : List[str] ):
'''simple docstring'''
return sample
def lowerCamelCase__ ( self : Dict , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.alphas[timestep_index]
__UpperCAmelCase : List[str] = self.betas[timestep_index]
__UpperCAmelCase : List[str] = self.alphas[prev_timestep_index]
__UpperCAmelCase : Tuple = self.betas[prev_timestep_index]
__UpperCAmelCase : Dict = (sample - sigma * ets) / max(UpperCamelCase , 1e-8 )
__UpperCAmelCase : Union[str, Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ):
'''simple docstring'''
return self.config.num_train_timesteps
| 139
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class a ( UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE : str = ["""pixel_values"""]
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> None:
super().__init__(**__lowerCAmelCase )
lowerCamelCase_ = size if size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = get_size_dict(__lowerCAmelCase )
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase , param_name='crop_size' )
lowerCamelCase_ = do_resize
lowerCamelCase_ = do_rescale
lowerCamelCase_ = do_normalize
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> np.ndarray:
lowerCamelCase_ = get_size_dict(__lowerCAmelCase )
if "shortest_edge" in size:
lowerCamelCase_ = get_resize_output_image_size(__lowerCAmelCase , size=size['shortest_edge'] , default_to_square=__lowerCAmelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
lowerCamelCase_ = (size['height'], size['width'])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> np.ndarray:
lowerCamelCase_ = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__lowerCAmelCase , size=(size['height'], size['width']) , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : int ) -> np.ndarray:
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> np.ndarray:
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : int = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[float] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : Tuple , ) -> BatchFeature:
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ = get_size_dict(__lowerCAmelCase , param_name='crop_size' , default_to_square=__lowerCAmelCase )
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(__lowerCAmelCase )
if not is_batched(__lowerCAmelCase ):
lowerCamelCase_ = [images]
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_center_crop:
lowerCamelCase_ = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
lowerCamelCase_ = {'pixel_values': images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 710
|
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : int ) -> int:
while a != 0:
lowerCamelCase_ , lowerCamelCase_ = b % a, a
return b
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : int ) -> int:
if gcd(_lowerCamelCase , _lowerCamelCase ) != 1:
lowerCamelCase_ = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowerCamelCase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1, 0, a
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0, 1, m
while va != 0:
lowerCamelCase_ = ua // va
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 137
| 0
|
"""simple docstring"""
def _a ( _snake_case = 100_0000 ):
"""simple docstring"""
UpperCAmelCase = set(range(3 , _snake_case , 2 ) )
primes.add(2 )
for p in range(3 , _snake_case , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _snake_case , _snake_case ) ) )
UpperCAmelCase = [float(_snake_case ) for n in range(limit + 1 )]
for p in primes:
for n in range(_snake_case , limit + 1 , _snake_case ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 341
|
"""simple docstring"""
import operator as op
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = lambda _snake_case , _snake_case : int(x / y ) # noqa: E731 integer division operation
UpperCAmelCase = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ )
print("""-""" * (30 + len(_snake_case )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_snake_case ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(_snake_case ) , sep=""" | """ )
else:
UpperCAmelCase = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(_snake_case ) , sep=""" | """ )
UpperCAmelCase = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(_snake_case ) , sep=""" | """ )
stack.append(
str(opr[x](int(_snake_case ) , int(_snake_case ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(_snake_case ) , sep=""" | """ , )
return int(stack[0] )
if __name__ == "__main__":
_UpperCamelCase = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 341
| 1
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__lowerCAmelCase : int = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
__lowerCAmelCase : int = parser.parse_args()
__lowerCAmelCase : Dict = "cpu"
__lowerCAmelCase : Any = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
__lowerCAmelCase : List[str] = "path-to-your-trained-model"
__lowerCAmelCase : Optional[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__lowerCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__lowerCAmelCase : Optional[Any] = pipe.to(device)
# to channels last
__lowerCAmelCase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last)
__lowerCAmelCase : List[str] = pipe.vae.to(memory_format=torch.channels_last)
__lowerCAmelCase : str = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__lowerCAmelCase : List[str] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__lowerCAmelCase : List[Any] = torch.randn(2, 4, 64, 64)
__lowerCAmelCase : Any = torch.rand(1) * 999
__lowerCAmelCase : int = torch.randn(2, 77, 768)
__lowerCAmelCase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
__lowerCAmelCase : List[str] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__lowerCAmelCase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : Optional[int] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__lowerCAmelCase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__lowerCAmelCase : Optional[Any] = 666
__lowerCAmelCase : Optional[int] = torch.Generator(device).manual_seed(seed)
__lowerCAmelCase : Optional[Any] = {"generator": generator}
if args.steps is not None:
__lowerCAmelCase : int = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__lowerCAmelCase : Any = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 284
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Optional[Any] ):
__lowercase : Union[str, Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowercase : Any = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(_snake_case ) , torch_builtin(_snake_case ) ) )
self.assertFalse(torch.allclose(gelu_python(_snake_case ) , gelu_new(_snake_case ) ) )
def snake_case_ ( self : Dict ):
__lowercase : Any = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowercase : List[str] = get_activation('''gelu''' )
__lowercase : Optional[Any] = get_activation('''gelu_10''' )
__lowercase : Tuple = torch_builtin(_snake_case )
__lowercase : List[Any] = geluaa(_snake_case )
__lowercase : Tuple = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(_snake_case ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def snake_case_ ( self : Any ):
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(_snake_case ):
get_activation('''bogus''' )
with self.assertRaises(_snake_case ):
get_activation(_snake_case )
def snake_case_ ( self : Dict ):
__lowercase : Union[str, Any] = get_activation('''gelu''' )
__lowercase : List[str] = 1
__lowercase : Union[str, Any] = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_snake_case ):
__lowercase : Tuple = acta.a
| 284
| 1
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = XGLMTokenizer
UpperCamelCase = XGLMTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def A__ ( self :Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ : Optional[Any] =XGLMTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Tuple ="""<pad>"""
__magic_name__ : Tuple =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : List[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(__snake_case ) , 10_08 )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_08 )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Dict =XGLMTokenizer(__snake_case , keep_accents=__snake_case )
__magic_name__ : Any =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__snake_case , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__magic_name__ : List[Any] =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__magic_name__ : Optional[Any] =tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__magic_name__ : Tuple =tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def A__ ( self :str ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def A__ ( self :int ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__snake_case , f.name )
__magic_name__ : Tuple =XGLMTokenizer(f.name , keep_accents=__snake_case )
__magic_name__ : Optional[Any] =pickle.dumps(__snake_case )
pickle.loads(__snake_case )
def A__ ( self :Dict ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__magic_name__ : List[str] =self.get_tokenizer()
__magic_name__ : Dict =self.get_rust_tokenizer()
__magic_name__ : Tuple ="""I was born in 92000, and this is falsé."""
__magic_name__ : List[str] =tokenizer.tokenize(__snake_case )
__magic_name__ : Tuple =rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__magic_name__ : List[str] =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
__magic_name__ : Optional[Any] =rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__magic_name__ : Dict =self.get_rust_tokenizer()
__magic_name__ : Dict =tokenizer.encode(__snake_case )
__magic_name__ : List[str] =rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Any ="""Hello World!"""
__magic_name__ : List[Any] =[2, 3_12_27, 44_47, 35]
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@slow
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =(
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
__magic_name__ : Tuple =[2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@slow
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : int ={
"""input_ids""": [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""facebook/xglm-564M""" , padding=__snake_case , )
| 21
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase : Any = 1_6
_lowerCAmelCase : Optional[int] = 3_2
def _A ( snake_case__ : Accelerator , snake_case__ : int = 16 ):
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case__ : str = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case__ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : List[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ : Tuple = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ : Tuple = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case__ : int = 8
else:
snake_case__ : List[Any] = None
return tokenizer.pad(
snake_case__ , padding='''longest''' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case__ : int = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
snake_case__ : Optional[int] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase : Tuple = mocked_dataloaders # noqa: F811
def _A ( snake_case__ : Optional[Any] , snake_case__ : int ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case__ ) == "1":
snake_case__ : Any = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
snake_case__ : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
snake_case__ : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : List[str] = config['''lr''']
snake_case__ : List[Any] = int(config['''num_epochs'''] )
snake_case__ : List[Any] = int(config['''seed'''] )
snake_case__ : Union[str, Any] = int(config['''batch_size'''] )
set_seed(snake_case__ )
snake_case__ ,snake_case__ : Tuple = get_dataloaders(snake_case__ , snake_case__ )
snake_case__ : Union[str, Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
snake_case__ : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case__ : Dict = batch_size // MAX_GPU_BATCH_SIZE
snake_case__ : int = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : Tuple = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ : Dict = model.to(accelerator.device )
# Instantiate optimizer
snake_case__ : List[Any] = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
snake_case__ : Dict = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : List[str] = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
snake_case__ : Union[str, Any] = os.path.split(snake_case__ )[-1].split('''.''' )[0]
accelerator.init_trackers(snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
snake_case__ : Tuple = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case__ : Optional[Any] = model(**snake_case__ )
snake_case__ : List[str] = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
snake_case__ : str = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : int = model(**snake_case__ )
snake_case__ : str = outputs.logits.argmax(dim=-1 )
snake_case__ ,snake_case__ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
snake_case__ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , snake_case__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(snake_case__ ),
'''epoch''': epoch,
} , step=snake_case__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _A ( ):
snake_case__ : Optional[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=snake_case__ , default=snake_case__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=snake_case__ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
snake_case__ : List[Any] = parser.parse_args()
snake_case__ : Any = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 261
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase : Dict = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 448
|
def A__ ( _a : list ):
'''simple docstring'''
if len(_a ) <= 1:
return [tuple(_a )]
snake_case__ : Optional[int] =[]
def generate(_a : int , _a : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , _a )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
snake_case__ , snake_case__ : Dict =arr[k - 1], arr[i]
else: # k is odd
snake_case__ , snake_case__ : int =arr[k - 1], arr[0]
generate(k - 1 , _a )
generate(len(_a ) , _a )
return res
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase : Any = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 448
| 1
|
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase_ ( _lowercase , _lowercase="shi-labs/oneformer_demo" ) -> List[str]:
'''simple docstring'''
with open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) as f:
lowerCamelCase_ : Optional[Any] = json.load(_lowercase )
lowerCamelCase_ : Any = {}
lowerCamelCase_ : int = []
lowerCamelCase_ : Union[str, Any] = []
for key, info in class_info.items():
lowerCamelCase_ : List[Any] = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(_lowercase ) )
lowerCamelCase_ : List[Any] = thing_ids
lowerCamelCase_ : Any = class_names
return metadata
class __lowercase ( unittest.TestCase ):
def __init__(self , A , A=7 , A=3 , A=3_0 , A=4_0_0 , A=None , A=True , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=1_0 , A=False , A=2_5_5 , A="shi-labs/oneformer_demo" , A="ade20k_panoptic.json" , A=1_0 , ):
lowerCamelCase_ : Union[str, Any] = parent
lowerCamelCase_ : Any = batch_size
lowerCamelCase_ : List[Any] = num_channels
lowerCamelCase_ : Tuple = min_resolution
lowerCamelCase_ : Tuple = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Tuple = {'''shortest_edge''': 3_2, '''longest_edge''': 1_3_3_3} if size is None else size
lowerCamelCase_ : Union[str, Any] = do_normalize
lowerCamelCase_ : Optional[Any] = image_mean
lowerCamelCase_ : str = image_std
lowerCamelCase_ : int = class_info_file
lowerCamelCase_ : Any = prepare_metadata(A , A )
lowerCamelCase_ : List[Any] = num_text
lowerCamelCase_ : Tuple = repo_path
# for the post_process_functions
lowerCamelCase_ : Dict = 2
lowerCamelCase_ : Union[str, Any] = 1_0
lowerCamelCase_ : Optional[Any] = 1_0
lowerCamelCase_ : Optional[Any] = 3
lowerCamelCase_ : List[str] = 4
lowerCamelCase_ : List[Any] = num_labels
lowerCamelCase_ : Dict = do_reduce_labels
lowerCamelCase_ : Tuple = ignore_index
def UpperCAmelCase__ (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCAmelCase__ (self , A , A=False ):
if not batched:
lowerCamelCase_ : Any = image_inputs[0]
if isinstance(A , Image.Image ):
lowerCamelCase_, lowerCamelCase_ : Tuple = image.size
else:
lowerCamelCase_, lowerCamelCase_ : List[str] = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ : Optional[Any] = int(self.size['''shortest_edge'''] * h / w )
lowerCamelCase_ : Tuple = self.size['''shortest_edge''']
elif w > h:
lowerCamelCase_ : Optional[int] = self.size['''shortest_edge''']
lowerCamelCase_ : Union[str, Any] = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCamelCase_ : Union[str, Any] = self.size['''shortest_edge''']
lowerCamelCase_ : Optional[int] = self.size['''shortest_edge''']
else:
lowerCamelCase_ : str = []
for image in image_inputs:
lowerCamelCase_, lowerCamelCase_ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ : Any = max(A , key=lambda A : item[0] )[0]
lowerCamelCase_ : Tuple = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
def UpperCAmelCase__ (self ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Optional[Any] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
lowerCamelCase : int = image_processing_class
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = OneFormerImageProcessorTester(self )
@property
def UpperCAmelCase__ (self ):
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
self.assertTrue(hasattr(A , '''ignore_index''' ) )
self.assertTrue(hasattr(A , '''class_info_file''' ) )
self.assertTrue(hasattr(A , '''num_text''' ) )
self.assertTrue(hasattr(A , '''repo_path''' ) )
self.assertTrue(hasattr(A , '''metadata''' ) )
self.assertTrue(hasattr(A , '''do_reduce_labels''' ) )
def UpperCAmelCase__ (self ):
pass
def UpperCAmelCase__ (self ):
# Initialize image_processor
lowerCamelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowerCamelCase_ : int = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = self.image_processing_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_, lowerCamelCase_ : Dict = self.image_processing_tester.get_expected_values(A , batched=A )
lowerCamelCase_ : Union[str, Any] = image_processor(
A , ['''semantic'''] * len(A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ (self ):
# Initialize image_processor
lowerCamelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : List[Any] = self.image_processing_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_, lowerCamelCase_ : Dict = self.image_processing_tester.get_expected_values(A , batched=A )
lowerCamelCase_ : Any = image_processor(
A , ['''semantic'''] * len(A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ (self ):
# Initialize image_processor
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : List[Any] = self.image_processing_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = self.image_processing_tester.get_expected_values(A , batched=A )
lowerCamelCase_ : Union[str, Any] = image_processor(
A , ['''semantic'''] * len(A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ (self , A=False , A=False , A="np" ):
lowerCamelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
lowerCamelCase_ : Union[str, Any] = self.image_processing_tester.num_labels
lowerCamelCase_ : List[str] = None
lowerCamelCase_ : Tuple = None
lowerCamelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=A )
if with_segmentation_maps:
lowerCamelCase_ : Union[str, Any] = num_labels
if is_instance_map:
lowerCamelCase_ : Dict = list(range(A ) ) * 2
lowerCamelCase_ : Tuple = dict(enumerate(A ) )
lowerCamelCase_ : int = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
lowerCamelCase_ : List[Any] = [Image.fromarray(A ) for annotation in annotations]
lowerCamelCase_ : Any = image_processor(
A , ['''semantic'''] * len(A ) , A , return_tensors='''pt''' , instance_id_to_semantic_id=A , pad_and_return_pixel_mask=A , )
return inputs
def UpperCAmelCase__ (self ):
pass
def UpperCAmelCase__ (self ):
def common(A=False , A=None ):
lowerCamelCase_ : Optional[Any] = self.comm_get_image_processor_inputs(
with_segmentation_maps=A , is_instance_map=A , segmentation_type=A )
lowerCamelCase_ : Tuple = inputs['''mask_labels''']
lowerCamelCase_ : List[str] = inputs['''class_labels''']
lowerCamelCase_ : str = inputs['''pixel_values''']
lowerCamelCase_ : Optional[Any] = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(A , A , A ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(A ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=A )
common(is_instance_map=A , segmentation_type='''pil''' )
common(is_instance_map=A , segmentation_type='''pil''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = np.zeros((2_0, 5_0) )
lowerCamelCase_ : List[str] = 1
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : List[str] = 1
lowerCamelCase_ : Tuple = binary_mask_to_rle(A )
self.assertEqual(len(A ) , 4 )
self.assertEqual(rle[0] , 2_1 )
self.assertEqual(rle[1] , 4_5 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
lowerCamelCase_ : str = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCamelCase_ : List[Any] = fature_extractor.post_process_semantic_segmentation(A )
self.assertEqual(len(A ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
lowerCamelCase_ : List[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
lowerCamelCase_ : Dict = fature_extractor.post_process_semantic_segmentation(A , target_sizes=A )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
lowerCamelCase_ : Dict = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCamelCase_ : Dict = image_processor.post_process_instance_segmentation(A , threshold=0 )
self.assertTrue(len(A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
lowerCamelCase_ : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCamelCase_ : Tuple = image_processor.post_process_panoptic_segmentation(A , threshold=0 )
self.assertTrue(len(A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 422
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Optional[int] = logging.get_logger(__name__)
__lowercase : Optional[int] = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class __lowercase ( _lowercase ):
lowerCamelCase : List[str] = "luke"
def __init__(self , A=5_0_2_6_7 , A=5_0_0_0_0_0 , A=7_6_8 , A=2_5_6 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1E-12 , A=True , A=None , A=1 , A=0 , A=2 , **A , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : List[str] = entity_vocab_size
lowerCamelCase_ : Dict = hidden_size
lowerCamelCase_ : str = entity_emb_size
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : List[Any] = num_attention_heads
lowerCamelCase_ : int = hidden_act
lowerCamelCase_ : List[str] = intermediate_size
lowerCamelCase_ : Tuple = hidden_dropout_prob
lowerCamelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase_ : Any = max_position_embeddings
lowerCamelCase_ : Any = type_vocab_size
lowerCamelCase_ : List[str] = initializer_range
lowerCamelCase_ : Any = layer_norm_eps
lowerCamelCase_ : Union[str, Any] = use_entity_aware_attention
lowerCamelCase_ : Optional[Any] = classifier_dropout
| 422
| 1
|
"""simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __a ( unittest.TestCase ):
def __init__( self , a__ ):
_lowerCamelCase = parent
def snake_case_ ( self ):
return {}
def SCREAMING_SNAKE_CASE_ ( )-> str:
_lowerCamelCase = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
_lowerCamelCase = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class __a ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : Tuple = MarkupLMFeatureExtractor if is_bsa_available() else None
def snake_case_ ( self ):
_lowerCamelCase = MarkupLMFeatureExtractionTester(self )
@property
def snake_case_ ( self ):
return self.feature_extract_tester.prepare_feat_extract_dict()
def snake_case_ ( self ):
# Initialize feature_extractor
_lowerCamelCase = self.feature_extraction_class()
# Test not batched input
_lowerCamelCase = get_html_strings()[0]
_lowerCamelCase = feature_extractor(a__ )
# fmt: off
_lowerCamelCase = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
_lowerCamelCase = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , a__ )
self.assertEqual(encoding.xpaths , a__ )
# Test batched
_lowerCamelCase = get_html_strings()
_lowerCamelCase = feature_extractor(a__ )
# fmt: off
_lowerCamelCase = expected_nodes + [['My First Heading', 'My first paragraph.']]
_lowerCamelCase = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , a__ )
self.assertEqual(encoding.xpaths , a__ )
| 222
|
"""simple docstring"""
from manim import *
class __a ( lowerCAmelCase__ ):
def snake_case_ ( self ):
_lowerCamelCase = Rectangle(height=0.5 , width=0.5 )
_lowerCamelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCamelCase = [mem.copy() for i in range(6 )]
_lowerCamelCase = [mem.copy() for i in range(6 )]
_lowerCamelCase = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCamelCase = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCamelCase = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
_lowerCamelCase = Text('CPU' , font_size=24 )
_lowerCamelCase = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a__ )
_lowerCamelCase = [mem.copy() for i in range(4 )]
_lowerCamelCase = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCamelCase = Text('GPU' , font_size=24 )
_lowerCamelCase = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
gpu.move_to([-1, -1, 0] )
self.add(a__ )
_lowerCamelCase = [mem.copy() for i in range(6 )]
_lowerCamelCase = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCamelCase = Text('Model' , font_size=24 )
_lowerCamelCase = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
model.move_to([3, -1.0, 0] )
self.add(a__ )
_lowerCamelCase = []
for i, rect in enumerate(a__ ):
rect.set_stroke(a__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_lowerCamelCase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(a__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=a__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=a__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=a__ , buff=0.0 )
self.add(a__ )
cpu_targs.append(a__ )
_lowerCamelCase = [mem.copy() for i in range(6 )]
_lowerCamelCase = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCamelCase = Text('Loaded Checkpoint' , font_size=24 )
_lowerCamelCase = Group(a__ , a__ ).arrange(a__ , aligned_edge=a__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_lowerCamelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCamelCase = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a__ , a__ )
_lowerCamelCase = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(a__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_lowerCamelCase = MarkupText(
F'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a__ ) , Write(a__ ) )
self.play(Write(a__ , run_time=1 ) , Create(a__ , run_time=1 ) )
_lowerCamelCase = []
_lowerCamelCase = []
for i, rect in enumerate(a__ ):
_lowerCamelCase = fill.copy().set_fill(a__ , opacity=0.7 )
target.move_to(a__ )
first_animations.append(GrowFromCenter(a__ , run_time=1 ) )
_lowerCamelCase = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(a__ , run_time=1.5 ) )
self.play(*a__ )
self.play(*a__ )
self.wait()
| 222
| 1
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCamelCase : Tuple = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
lowerCamelCase : str = {"facebook/blenderbot_small-90M": 512}
def _SCREAMING_SNAKE_CASE ( lowercase : Dict ):
'''simple docstring'''
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
lowerCamelCase_ = set(lowercase )
return pairs
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , A_ : Dict , A_ : List[Any] , A_ : List[Any]="__start__" , A_ : Any="__end__" , A_ : Tuple="__unk__" , A_ : str="__null__" , **A_ : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ )
with open(A_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(A_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in merges]
lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) )
lowerCamelCase_ = {}
@property
def a__ ( self : str ) -> int:
"""simple docstring"""
return len(self.encoder )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self : str , A_ : str ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = re.sub('([.,!?()])' , r' \1' , A_ )
lowerCamelCase_ = re.sub('(\')' , r' \1 ' , A_ )
lowerCamelCase_ = re.sub(r'\s{2,}' , ' ' , A_ )
if "\n" in token:
lowerCamelCase_ = token.replace('\n' , ' __newln__' )
lowerCamelCase_ = token.split(' ' )
lowerCamelCase_ = []
for token in tokens:
if not len(A_ ):
continue
lowerCamelCase_ = token.lower()
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
lowerCamelCase_ = get_pairs(A_ )
if not pairs:
words.append(A_ )
continue
while True:
lowerCamelCase_ = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ , lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(A_ ):
try:
lowerCamelCase_ = word.index(A_ , A_ )
new_word.extend(word[i:j] )
lowerCamelCase_ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = new_word
if len(A_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(A_ )
lowerCamelCase_ = '@@ '.join(A_ )
lowerCamelCase_ = word[:-4]
lowerCamelCase_ = word
words.append(A_ )
return " ".join(A_ )
def a__ ( self : Tuple , A_ : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = []
lowerCamelCase_ = re.findall(r'\S+\n?' , A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(' ' ) ) )
return split_tokens
def a__ ( self : Tuple , A_ : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = token.lower()
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def a__ ( self : Tuple , A_ : int ) -> str:
"""simple docstring"""
return self.decoder.get(A_ , self.unk_token )
def a__ ( self : Optional[Any] , A_ : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = ' '.join(A_ ).replace('@@ ' , '' ).strip()
return out_string
def a__ ( self : Tuple , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
lowerCamelCase_ = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
| 70
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : int = "xglm"
A__ : List[Any] = ["past_key_values"]
A__ : str = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case_=256008 , snake_case_=2048 , snake_case_=1024 , snake_case_=4096 , snake_case_=24 , snake_case_=16 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=True , snake_case_=True , snake_case_=2 , snake_case_=1 , snake_case_=0 , snake_case_=2 , **snake_case_ , ) -> List[str]:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = d_model
_UpperCAmelCase = ffn_dim
_UpperCAmelCase = num_layers
_UpperCAmelCase = attention_heads
_UpperCAmelCase = activation_function
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = layerdrop
_UpperCAmelCase = init_std
_UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase = use_cache
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , **snake_case_ , )
| 426
| 0
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
snake_case_ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase__ ( snake_case_ : Optional[int] ) -> int:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def lowerCamelCase__ ( snake_case_ : List[Any] ) -> Union[str, Any]:
from transformers.testing_utils import pytest_terminal_summary_main
__snake_case = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 388
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : str = 'segformer'
def __init__(self : List[Any] , a__ : int=3 , a__ : Optional[Any]=4 , a__ : Any=[2, 2, 2, 2] , a__ : Union[str, Any]=[8, 4, 2, 1] , a__ : str=[32, 64, 160, 256] , a__ : Optional[Any]=[7, 3, 3, 3] , a__ : Tuple=[4, 2, 2, 2] , a__ : Tuple=[1, 2, 5, 8] , a__ : List[Any]=[4, 4, 4, 4] , a__ : Dict="gelu" , a__ : Union[str, Any]=0.0 , a__ : List[str]=0.0 , a__ : Dict=0.1 , a__ : Any=0.0_2 , a__ : Optional[int]=0.1 , a__ : Tuple=1E-6 , a__ : Any=256 , a__ : Optional[Any]=255 , **a__ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**a__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , a__ , )
__snake_case = num_channels
__snake_case = num_encoder_blocks
__snake_case = depths
__snake_case = sr_ratios
__snake_case = hidden_sizes
__snake_case = patch_sizes
__snake_case = strides
__snake_case = mlp_ratios
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = classifier_dropout_prob
__snake_case = initializer_range
__snake_case = drop_path_rate
__snake_case = layer_norm_eps
__snake_case = decoder_hidden_size
__snake_case = kwargs.get('''reshape_last_stage''' , a__ )
__snake_case = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Dict = version.parse('1.11' )
@property
def a (self : List[str] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a (self : Tuple ):
"""simple docstring"""
return 1E-4
@property
def a (self : int ):
"""simple docstring"""
return 12
| 388
| 1
|
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
UpperCamelCase_: Union[str, Any] = str(bin(__a ) )
binary_number += "0" * shift_amount
return binary_number
def A__ ( lowerCamelCase , lowerCamelCase ) -> List[str]:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
UpperCamelCase_: int = str(bin(__a ) )[2:]
if shift_amount >= len(__a ):
return "0b0"
UpperCamelCase_: Optional[int] = binary_number[: len(__a ) - shift_amount]
return "0b" + shifted_binary_number
def A__ ( lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
if number >= 0: # Get binary representation of positive number
UpperCamelCase_: Any = '0' + str(bin(__a ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
UpperCamelCase_: int = len(bin(__a )[3:] ) # Find 2's complement of number
UpperCamelCase_: int = bin(abs(__a ) - (1 << binary_number_length) )[3:]
UpperCamelCase_: str = (
'1' + '0' * (binary_number_length - len(__a )) + binary_number
)
if shift_amount >= len(__a ):
return "0b" + binary_number[0] * len(__a )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__a ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 548
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __a : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : int = DPTConfig()
if "large" in checkpoint_url:
_lowerCamelCase : Optional[Any] = 10_24
_lowerCamelCase : List[str] = 40_96
_lowerCamelCase : Union[str, Any] = 24
_lowerCamelCase : Any = 16
_lowerCamelCase : Union[str, Any] = [5, 11, 17, 23]
_lowerCamelCase : Optional[int] = [2_56, 5_12, 10_24, 10_24]
_lowerCamelCase : List[str] = (1, 3_84, 3_84)
if "ade" in checkpoint_url:
_lowerCamelCase : Any = True
_lowerCamelCase : List[str] = 1_50
_lowerCamelCase : int = 'huggingface/label-files'
_lowerCamelCase : Union[str, Any] = 'ade20k-id2label.json'
_lowerCamelCase : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__a , __a , repo_type='dataset' ) ) , 'r' ) )
_lowerCamelCase : Optional[Any] = {int(__a ): v for k, v in idalabel.items()}
_lowerCamelCase : int = idalabel
_lowerCamelCase : Any = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def UpperCAmelCase_ ( __a : Tuple ):
'''simple docstring'''
_lowerCamelCase : int = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(__a , __a )
def UpperCAmelCase_ ( __a : Dict ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowerCamelCase : Optional[Any] = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowerCamelCase : Optional[int] = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowerCamelCase : Any = name.replace('patch_embed' , 'patch_embeddings' )
if "pos_embed" in name:
_lowerCamelCase : str = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowerCamelCase : int = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowerCamelCase : Tuple = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowerCamelCase : Optional[int] = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowerCamelCase : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowerCamelCase : Optional[int] = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowerCamelCase : str = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowerCamelCase : Optional[int] = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowerCamelCase : Dict = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowerCamelCase : Tuple = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowerCamelCase : Tuple = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowerCamelCase : Tuple = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowerCamelCase : List[Any] = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowerCamelCase : str = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowerCamelCase : Union[str, Any] = name.replace(f"refinenet{layer_idx}" , f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
_lowerCamelCase : Optional[Any] = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowerCamelCase : str = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowerCamelCase : List[str] = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowerCamelCase : List[Any] = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowerCamelCase : Optional[Any] = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowerCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowerCamelCase : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowerCamelCase : List[str] = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowerCamelCase : Dict = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowerCamelCase : Optional[int] = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowerCamelCase : List[str] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowerCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowerCamelCase : str = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowerCamelCase : List[Any] = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowerCamelCase : List[str] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowerCamelCase : Tuple = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowerCamelCase : Any = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowerCamelCase : Tuple = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowerCamelCase : str = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowerCamelCase : Union[str, Any] = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowerCamelCase : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head' )
return name
def UpperCAmelCase_ ( __a : Tuple , __a : Any ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Any = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
_lowerCamelCase : Dict = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Tuple = in_proj_weight[: config.hidden_size, :]
_lowerCamelCase : Optional[int] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : str = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCamelCase : Dict = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __a : Optional[int] , __a : int , __a : str , __a : List[str] ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : List[str] = get_dpt_config(__a )
# load original state_dict from URL
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(__a , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(__a )
# rename keys
for key in state_dict.copy().keys():
_lowerCamelCase : str = state_dict.pop(__a )
_lowerCamelCase : str = val
# read in qkv matrices
read_in_q_k_v(__a , __a )
# load HuggingFace model
_lowerCamelCase : List[str] = DPTForSemanticSegmentation(__a ) if 'ade' in checkpoint_url else DPTForDepthEstimation(__a )
model.load_state_dict(__a )
model.eval()
# Check outputs on an image
_lowerCamelCase : str = 4_80 if 'ade' in checkpoint_url else 3_84
_lowerCamelCase : Optional[Any] = DPTImageProcessor(size=__a )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : str = image_processor(__a , return_tensors='pt' )
# forward pass
_lowerCamelCase : Tuple = model(**__a ).logits if 'ade' in checkpoint_url else model(**__a ).predicted_depth
# Assert logits
_lowerCamelCase : Dict = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] )
if "ade" in checkpoint_url:
_lowerCamelCase : List[Any] = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] )
assert outputs.shape == torch.Size(__a )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __a , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __a )
)
Path(__a ).mkdir(exist_ok=__a )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__a )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__a )
if push_to_hub:
print('Pushing model to hub...' )
model.push_to_hub(
repo_path_or_name=Path(__a , __a ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__a , )
image_processor.push_to_hub(
repo_path_or_name=Path(__a , __a ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__a , )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
a_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 437
| 0
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
@property
def _A ( self: Optional[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def _A ( self: int ):
SCREAMING_SNAKE_CASE_ = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE_ = ScoreSdeVeScheduler()
SCREAMING_SNAKE_CASE_ = ScoreSdeVePipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
sde_ve.to(_lowerCamelCase )
sde_ve.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=_lowerCamelCase ).images
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=_lowerCamelCase , return_dict=_lowerCamelCase )[
0
]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
def _A ( self: int ):
SCREAMING_SNAKE_CASE_ = '''google/ncsnpp-church-256'''
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = ScoreSdeVeScheduler.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = ScoreSdeVePipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
sde_ve.to(_lowerCamelCase )
sde_ve.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=_lowerCamelCase ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 89
|
def a (_lowerCAmelCase ):
if number > 0:
raise ValueError('''input must be a negative integer''' )
SCREAMING_SNAKE_CASE_ = len(bin(_lowerCAmelCase )[3:] )
SCREAMING_SNAKE_CASE_ = bin(abs(_lowerCAmelCase ) - (1 << binary_number_length) )[3:]
SCREAMING_SNAKE_CASE_ = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_lowerCAmelCase ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89
| 1
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__snake_case = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__snake_case = {
'''allenai/longformer-base-4096''': 4_0_9_6,
'''allenai/longformer-large-4096''': 4_0_9_6,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4_0_9_6,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4_0_9_6,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _A ( ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__UpperCamelCase = bs[:]
__UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowercase )
cs.append(2**8 + n )
n += 1
__UpperCamelCase = [chr(_lowercase ) for n in cs]
return dict(zip(_lowercase , _lowercase ) )
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = set()
__UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCamelCase = char
return pairs
class __lowerCamelCase (_a ):
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self: str,A_: List[Any],A_: List[str],A_: List[Any]="replace",A_: Optional[int]="<s>",A_: Union[str, Any]="</s>",A_: List[Any]="</s>",A_: int="<s>",A_: List[Any]="<unk>",A_: List[str]="<pad>",A_: str="<mask>",A_: Dict=False,**A_: Tuple,):
'''simple docstring'''
__UpperCamelCase = AddedToken(A_,lstrip=A_,rstrip=A_ ) if isinstance(A_,A_ ) else bos_token
__UpperCamelCase = AddedToken(A_,lstrip=A_,rstrip=A_ ) if isinstance(A_,A_ ) else eos_token
__UpperCamelCase = AddedToken(A_,lstrip=A_,rstrip=A_ ) if isinstance(A_,A_ ) else sep_token
__UpperCamelCase = AddedToken(A_,lstrip=A_,rstrip=A_ ) if isinstance(A_,A_ ) else cls_token
__UpperCamelCase = AddedToken(A_,lstrip=A_,rstrip=A_ ) if isinstance(A_,A_ ) else unk_token
__UpperCamelCase = AddedToken(A_,lstrip=A_,rstrip=A_ ) if isinstance(A_,A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase = AddedToken(A_,lstrip=A_,rstrip=A_ ) if isinstance(A_,A_ ) else mask_token
super().__init__(
errors=A_,bos_token=A_,eos_token=A_,unk_token=A_,sep_token=A_,cls_token=A_,pad_token=A_,mask_token=A_,add_prefix_space=A_,**A_,)
with open(A_,encoding='utf-8' ) as vocab_handle:
__UpperCamelCase = json.load(A_ )
__UpperCamelCase = {v: k for k, v in self.encoder.items()}
__UpperCamelCase = errors # how to handle errors in decoding
__UpperCamelCase = bytes_to_unicode()
__UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(A_,encoding='utf-8' ) as merges_handle:
__UpperCamelCase = merges_handle.read().split('\n' )[1:-1]
__UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = {}
__UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCamelCase = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return len(self.encoder )
def snake_case_ ( self: Any ):
'''simple docstring'''
return dict(self.encoder,**self.added_tokens_encoder )
def snake_case_ ( self: Dict,A_: List[Any] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__UpperCamelCase = tuple(A_ )
__UpperCamelCase = get_pairs(A_ )
if not pairs:
return token
while True:
__UpperCamelCase = min(A_,key=lambda A_ : self.bpe_ranks.get(A_,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCamelCase, __UpperCamelCase = bigram
__UpperCamelCase = []
__UpperCamelCase = 0
while i < len(A_ ):
try:
__UpperCamelCase = word.index(A_,A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCamelCase = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCamelCase = tuple(A_ )
__UpperCamelCase = new_word
if len(A_ ) == 1:
break
else:
__UpperCamelCase = get_pairs(A_ )
__UpperCamelCase = ' '.join(A_ )
__UpperCamelCase = word
return word
def snake_case_ ( self: Tuple,A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = []
for token in re.findall(self.pat,A_ ):
__UpperCamelCase = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) )
return bpe_tokens
def snake_case_ ( self: Dict,A_: Union[str, Any] ):
'''simple docstring'''
return self.encoder.get(A_,self.encoder.get(self.unk_token ) )
def snake_case_ ( self: Union[str, Any],A_: int ):
'''simple docstring'''
return self.decoder.get(A_ )
def snake_case_ ( self: Tuple,A_: List[str] ):
'''simple docstring'''
__UpperCamelCase = ''.join(A_ )
__UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8',errors=self.errors )
return text
def snake_case_ ( self: List[str],A_: str,A_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCamelCase = os.path.join(
A_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(
A_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_,'w',encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=A_,ensure_ascii=A_ ) + '\n' )
__UpperCamelCase = 0
with open(A_,'w',encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
__UpperCamelCase = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
def snake_case_ ( self: Union[str, Any],A_: List[int],A_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
__UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self: str,A_: List[int],A_: Optional[List[int]] = None,A_: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_,token_ids_a=A_,already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def snake_case_ ( self: Optional[Any],A_: List[int],A_: Optional[List[int]] = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self: List[str],A_: Optional[int],A_: Optional[int]=False,**A_: int ):
'''simple docstring'''
__UpperCamelCase = kwargs.pop('add_prefix_space',self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
__UpperCamelCase = ' ' + text
return (text, kwargs)
| 1
|
'''simple docstring'''
import random
from typing import Any
def __lowercase ( __lowercase ) -> list[Any]:
'''simple docstring'''
for _ in range(len(__lowercase ) ):
_A = random.randint(0 , len(__lowercase ) - 1 )
_A = random.randint(0 , len(__lowercase ) - 1 )
_A , _A = data[b], data[a]
return data
if __name__ == "__main__":
lowerCamelCase_ = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCamelCase_ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 330
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> int:
return 1 if input_a == input_a else 0
def __UpperCAmelCase ( ) -> None:
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 717
|
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(__lowerCamelCase , '''_dynamo''' ):
return False
return isinstance(__lowerCamelCase , torch._dynamo.eval_frame.OptimizedModule )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = True ) -> Optional[Any]:
lowercase__ : List[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase__ : str = is_compiled_module(__lowerCamelCase )
if is_compiled:
lowercase__ : int = model
lowercase__ : int = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase__ : Union[str, Any] = model.module
if not keep_fpaa_wrapper:
lowercase__ : List[Any] = getattr(__lowerCamelCase , '''forward''' )
lowercase__ : Any = model.__dict__.pop('''_original_forward''' , __lowerCamelCase )
if original_forward is not None:
while hasattr(__lowerCamelCase , '''__wrapped__''' ):
lowercase__ : Optional[int] = forward.__wrapped__
if forward == original_forward:
break
lowercase__ : Dict = forward
if getattr(__lowerCamelCase , '''_converted_to_transformer_engine''' , __lowerCamelCase ):
convert_model(__lowerCamelCase , to_transformer_engine=__lowerCamelCase )
if is_compiled:
lowercase__ : Optional[Any] = model
lowercase__ : Tuple = compiled_model
return model
def __UpperCAmelCase ( ) -> int:
PartialState().wait_for_everyone()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__lowerCamelCase , __lowerCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__lowerCamelCase , __lowerCamelCase )
@contextmanager
def __UpperCAmelCase ( **__lowerCamelCase ) -> Optional[int]:
for key, value in kwargs.items():
lowercase__ : Optional[int] = str(__lowerCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]:
if not hasattr(__lowerCamelCase , '''__qualname__''' ) and not hasattr(__lowerCamelCase , '''__name__''' ):
lowercase__ : Tuple = getattr(__lowerCamelCase , '''__class__''' , __lowerCamelCase )
if hasattr(__lowerCamelCase , '''__qualname__''' ):
return obj.__qualname__
if hasattr(__lowerCamelCase , '''__name__''' ):
return obj.__name__
return str(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> int:
for key, value in source.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase__ : int = destination.setdefault(__lowerCamelCase , {} )
merge_dicts(__lowerCamelCase , __lowerCamelCase )
else:
lowercase__ : Optional[int] = value
return destination
def __UpperCAmelCase ( __lowerCamelCase = None ) -> bool:
if port is None:
lowercase__ : List[Any] = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 122
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCAmelCase =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def _A ( _a : str , _a : Any , _a : Optional[int] ):
"""simple docstring"""
A = state_dict.pop(lowerCamelCase_ )
A = val
def _A ( _a : List[Any] ):
"""simple docstring"""
A = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
A = value
else:
A = value
return new_state_dict
def _A ( _a : Any , _a : List[str]=False ):
"""simple docstring"""
A = """"""
if is_panoptic:
A = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
A = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:2_5_6, :]
A = in_proj_bias[:2_5_6]
A = in_proj_weight[2_5_6:5_1_2, :]
A = in_proj_bias[2_5_6:5_1_2]
A = in_proj_weight[-2_5_6:, :]
A = in_proj_bias[-2_5_6:]
def _A ( ):
"""simple docstring"""
A = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def _A ( _a : Any , _a : Union[str, Any] ):
"""simple docstring"""
A = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A = """resnet101"""
if "dc5" in model_name:
A = True
A = """panoptic""" in model_name
if is_panoptic:
A = 2_5_0
else:
A = 9_1
A = """huggingface/label-files"""
A = """coco-detection-id2label.json"""
A = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
A = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
# load image processor
A = """coco_panoptic""" if is_panoptic else """coco_detection"""
A = ConditionalDetrImageProcessor(format=lowerCamelCase_ )
# prepare image
A = prepare_img()
A = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" )
A = encoding["""pixel_values"""]
logger.info(f'Converting model {model_name}...' )
# load original model from torch hub
A = torch.hub.load("""DeppMeng/ConditionalDETR""" , lowerCamelCase_ , pretrained=lowerCamelCase_ ).eval()
A = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A = """conditional_detr.""" + src
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
A = rename_backbone_keys(lowerCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase_ , is_panoptic=lowerCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
A = state_dict.pop(lowerCamelCase_ )
A = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A = state_dict.pop(lowerCamelCase_ )
A = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
A = state_dict.pop(lowerCamelCase_ )
A = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A = state_dict.pop(lowerCamelCase_ )
A = val
# finally, create HuggingFace model and load state dict
A = ConditionalDetrForSegmentation(lowerCamelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
model.push_to_hub(repo_id=lowerCamelCase_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
A = conditional_detr(lowerCamelCase_ )
A = model(lowerCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
UpperCAmelCase =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 617
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase_(lowerCamelCase_ ) -> Tuple:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __magic_name__ ( nn.Module ):
def __init__( self : List[Any] , UpperCamelCase__ : nn.Module , UpperCamelCase__ : int ) -> List[Any]:
'''simple docstring'''
super().__init__()
UpperCAmelCase = module
UpperCAmelCase = nn.Sequential(
nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__ ) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__ ) , )
UpperCAmelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCamelCase__ : Dict , *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) + self.adapter(UpperCamelCase__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __magic_name__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowercase : str ='''bigscience/bloom-1b7'''
# Constant values
lowercase : Optional[Any] =2.109_659_552_692_574
lowercase : int ='''Hello my name is'''
lowercase : Union[str, Any] =set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
lowercase : int =10
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained(self.model_name )
class __magic_name__ ( A__ ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="auto" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = self.model_abit.config
self.assertTrue(hasattr(UpperCamelCase__ , "quantization_config" ) )
UpperCAmelCase = config.to_dict()
UpperCAmelCase = config.to_diff_dict()
UpperCAmelCase = config.to_json_string()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> int:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
UpperCAmelCase = self.model_fpaa.get_memory_footprint()
UpperCAmelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
UpperCAmelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCamelCase__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
UpperCAmelCase = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
'''simple docstring'''
UpperCAmelCase = BitsAndBytesConfig()
UpperCAmelCase = True
UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , device_map="auto" )
UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
UpperCAmelCase = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = BitsAndBytesConfig()
with self.assertRaises(UpperCamelCase__ ):
UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaises(UpperCamelCase__ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(UpperCamelCase__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(UpperCamelCase__ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(UpperCamelCase__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCamelCase__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
UpperCAmelCase = self.model_fpaa.to(torch.floataa )
UpperCAmelCase = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
UpperCAmelCase = self.model_fpaa.to("cpu" )
# Check this does not throw an error
UpperCAmelCase = self.model_fpaa.half()
# Check this does not throw an error
UpperCAmelCase = self.model_fpaa.float()
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> int:
'''simple docstring'''
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=UpperCamelCase__ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __magic_name__ ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = "t5-small"
UpperCAmelCase = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
UpperCAmelCase = AutoTokenizer.from_pretrained(cls.model_name )
UpperCAmelCase = "Translate in German: Hello, my dog is cute"
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Any:
'''simple docstring'''
from transformers import TaForConditionalGeneration
UpperCAmelCase = TaForConditionalGeneration._keep_in_fpaa_modules
UpperCAmelCase = None
# test with `t5-small`
UpperCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="auto" )
UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCAmelCase = model.generate(**UpperCamelCase__ )
# test with `flan-t5-small`
UpperCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="auto" )
UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCAmelCase = model.generate(**UpperCamelCase__ )
UpperCAmelCase = modules
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
UpperCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCAmelCase = model.generate(**UpperCamelCase__ )
# test with `flan-t5-small`
UpperCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="auto" )
UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCAmelCase = model.generate(**UpperCamelCase__ )
class __magic_name__ ( A__ ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
super().setUp()
# model_name
UpperCAmelCase = "bigscience/bloom-560m"
UpperCAmelCase = "t5-small"
# Different types of model
UpperCAmelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="auto" )
# Sequence classification model
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="auto" )
# CausalLM model
UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="auto" )
# Seq2seq model
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="auto" )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __magic_name__ ( A__ ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
super().setUp()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
UpperCAmelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __magic_name__ ( A__ ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str:
'''simple docstring'''
super().setUp()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
UpperCAmelCase = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS )
class __magic_name__ ( A__ ):
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = "facebook/opt-350m"
super().setUp()
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Any:
'''simple docstring'''
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
UpperCAmelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
UpperCAmelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCamelCase__ ) ):
UpperCAmelCase = LoRALayer(module.q_proj , rank=16 )
UpperCAmelCase = LoRALayer(module.k_proj , rank=16 )
UpperCAmelCase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
UpperCAmelCase = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
UpperCAmelCase = model.forward(**UpperCamelCase__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(UpperCamelCase__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __magic_name__ ( A__ ):
lowercase : str ='''gpt2-xl'''
lowercase : int =3.3_191_854_854_152_187
| 323
| 0
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger()
@dataclass
class _a :
'''simple docstring'''
A :nn.Module
A :List[nn.Module] = field(default_factory=SCREAMING_SNAKE_CASE )
A :list = field(default_factory=SCREAMING_SNAKE_CASE )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Union[str, Any] = len(list(m.modules() ) ) == 1 or isinstance(__UpperCAmelCase , nn.Convad ) or isinstance(__UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def _A ( self ):
"""simple docstring"""
return list(filter(lambda __UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _a :
'''simple docstring'''
A :nn.Module
A :nn.Module
A :int = 0
A :List = field(default_factory=SCREAMING_SNAKE_CASE )
A :List = field(default_factory=SCREAMING_SNAKE_CASE )
def __call__( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : str = Tracker(self.dest )(__UpperCAmelCase ).parametrized
a__ : str = Tracker(self.src )(__UpperCAmelCase ).parametrized
a__ : str = list(filter(lambda __UpperCAmelCase : type(__UpperCAmelCase ) not in self.src_skip , __UpperCAmelCase ) )
a__ : Optional[int] = list(filter(lambda __UpperCAmelCase : type(__UpperCAmelCase ) not in self.dest_skip , __UpperCAmelCase ) )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise Exception(
f'Numbers of operations are different. Source module has {len(__UpperCAmelCase )} operations while'
f' destination module has {len(__UpperCAmelCase )}.' )
for dest_m, src_m in zip(__UpperCAmelCase , __UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True ) -> Any:
print(F'Converting {name}...' )
with torch.no_grad():
a__ : List[str] = timm.create_model(__UpperCamelCase , pretrained=__UpperCamelCase ).eval()
a__ : List[str] = ResNetForImageClassification(__UpperCamelCase ).eval()
a__ : int = ModuleTransfer(src=__UpperCamelCase , dest=__UpperCamelCase )
a__ : Optional[int] = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(__UpperCamelCase )
assert torch.allclose(from_model(__UpperCamelCase ) , our_model(__UpperCamelCase ).logits ), "The model logits don't match the original one."
a__ : Union[str, Any] = F'resnet{"-".join(name.split("resnet" ) )}'
print(__UpperCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=__UpperCamelCase , )
# we can use the convnext one
a__ : str = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=__UpperCamelCase , )
print(F'Pushed {checkpoint_name}' )
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True ) -> Optional[Any]:
a__ : Dict = "imagenet-1k-id2label.json"
a__ : Optional[int] = 10_00
a__ : Tuple = (1, num_labels)
a__ : Optional[int] = "huggingface/label-files"
a__ : Union[str, Any] = num_labels
a__ : Optional[int] = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset" ) , "r" ) )
a__ : Optional[int] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
a__ : Union[str, Any] = idalabel
a__ : Tuple = {v: k for k, v in idalabel.items()}
a__ : Optional[int] = partial(__UpperCamelCase , num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
a__ : Tuple = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(__UpperCamelCase , names_to_config[model_name] , __UpperCamelCase , __UpperCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowerCamelCase = parser.parse_args()
lowerCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 207
|
from collections.abc import Callable
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
a__ : float = a
a__ : float = b
if function(__UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(__UpperCamelCase ) == 0:
return b
elif (
function(__UpperCamelCase ) * function(__UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
a__ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__UpperCamelCase ) == 0:
return mid
elif function(__UpperCamelCase ) * function(__UpperCamelCase ) < 0:
a__ : int = mid
else:
a__ : List[Any] = mid
a__ : Optional[Any] = start + (end - start) / 2.0
return mid
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> float:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 207
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.