code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCamelCase__ : int = 50_003
lowerCamelCase__ : List[str] = 50_002
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = PLBartTokenizer
lowercase_ = None
lowercase_ = False
def lowerCAmelCase_ ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ = PLBartTokenizer(_lowerCAmelCase , language_codes='base' , keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = PLBartTokenizer(_lowerCAmelCase , language_codes='base' , keep_accents=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(_lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.vocab_size
SCREAMING_SNAKE_CASE_ = [tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) for x in range(end - 4 , _lowerCAmelCase )]
self.assertListEqual(_lowerCAmelCase , ['__java__', '__python__', '__en_XX__', '<mask>'] )
SCREAMING_SNAKE_CASE_ = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
SCREAMING_SNAKE_CASE_ = tokenizer(_lowerCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) , _lowerCAmelCase , )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = PLBartTokenizer(_lowerCAmelCase , language_codes='multi' , keep_accents=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(_lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.vocab_size
SCREAMING_SNAKE_CASE_ = [tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) for x in range(end - 7 , _lowerCAmelCase )]
self.assertListEqual(
_lowerCAmelCase , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'] )
SCREAMING_SNAKE_CASE_ = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
SCREAMING_SNAKE_CASE_ = tokenizer(_lowerCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) , _lowerCAmelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = "uclanlp/plbart-python-en_XX"
lowercase_ = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
lowercase_ = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
lowercase_ = [
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] ):
SCREAMING_SNAKE_CASE_ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX' )
SCREAMING_SNAKE_CASE_ = 1
return cls
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 50_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 50_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 50_003 )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
self.assertIn(_lowerCAmelCase , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE_ = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
SCREAMING_SNAKE_CASE_ = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = ['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 20]
self.assertIsInstance(src_text[0] , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = self.tokenizer(_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__'] ) , [50_004, 50_001] )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = PLBartTokenizer.from_pretrained(_lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowerCAmelCase )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _lowerCAmelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.src_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=3 , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=10 , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = targets['input_ids']
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_lowerCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java' )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , {
# A, test, EOS, en_XX
'input_ids': [[150, 242, 2, 50_003]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 50_001,
} , )
| 31
|
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __UpperCamelCase ( _a ):
'''simple docstring'''
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Optional[Any] = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _UpperCAmelCase ( self ):
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__: Optional[Any] = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _UpperCAmelCase ( self ):
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__: List[Any] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Dict = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _UpperCAmelCase ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCAmelCase__: List[str] = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: List[str] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: str = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Optional[Any] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _UpperCAmelCase ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCAmelCase__: Union[str, Any] = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Any = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: int = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _UpperCAmelCase ( self ):
import PIL.Image
UpperCAmelCase__: int = PIL.Image.fromarray(np.arange(1_0 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=lowerCamelCase__ ) as mock_cast_to_python_objects:
UpperCAmelCase__: Tuple = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image] , type=Image() ) )
UpperCAmelCase__ , UpperCAmelCase__: Union[str, Any] = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , lowerCamelCase__ )
self.assertFalse(kwargs["optimize_list_casting"] )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Optional[Any] = pa.BufferReader(SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE ,pa.Buffer ) else pa.memory_map(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: str = pa.ipc.open_stream(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 1_0] )
@pytest.mark.parametrize(
"fields" ,[None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: List[str] = pa.BufferOutputStream()
UpperCAmelCase__: Tuple = pa.schema(SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE ,schema=SCREAMING_SNAKE_CASE ,writer_batch_size=SCREAMING_SNAKE_CASE ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
UpperCAmelCase__ , UpperCAmelCase__: List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase__: Optional[int] = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _A ( ):
UpperCAmelCase__: Tuple = pa.BufferOutputStream()
UpperCAmelCase__: Any = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=SCREAMING_SNAKE_CASE ,features=SCREAMING_SNAKE_CASE ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
UpperCAmelCase__ , UpperCAmelCase__: List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
UpperCAmelCase__: Dict = pa.BufferReader(output.getvalue() )
UpperCAmelCase__: Any = pa.ipc.open_stream(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: pa.Table = f.read_all()
UpperCAmelCase__: Union[str, Any] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 1_0] )
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: List[str] = pa.BufferOutputStream()
with ArrowWriter(
stream=SCREAMING_SNAKE_CASE ,writer_batch_size=SCREAMING_SNAKE_CASE ,hash_salt="split_name" ,check_duplicates=SCREAMING_SNAKE_CASE ,) as writer:
with pytest.raises(SCREAMING_SNAKE_CASE ):
writer.write({"col_1": "foo", "col_2": 1} ,key=[1, 2] )
UpperCAmelCase__ , UpperCAmelCase__: Any = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" ,[None, 2, 1_0] )
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: int = pa.BufferOutputStream()
with ArrowWriter(
stream=SCREAMING_SNAKE_CASE ,writer_batch_size=SCREAMING_SNAKE_CASE ,hash_salt="split_name" ,check_duplicates=SCREAMING_SNAKE_CASE ,) as writer:
with pytest.raises(SCREAMING_SNAKE_CASE ):
writer.write({"col_1": "foo", "col_2": 1} ,key=1_0 )
writer.write({"col_1": "bar", "col_2": 2} ,key=1_0 )
UpperCAmelCase__ , UpperCAmelCase__: str = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" ,[None, 2, 1_0] )
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Dict = pa.BufferOutputStream()
with ArrowWriter(
stream=SCREAMING_SNAKE_CASE ,writer_batch_size=SCREAMING_SNAKE_CASE ,hash_salt="split_name" ,check_duplicates=SCREAMING_SNAKE_CASE ,) as writer:
writer.write({"col_1": "foo", "col_2": 1} ,key=1 )
writer.write({"col_1": "bar", "col_2": 2} ,key=2 )
UpperCAmelCase__ , UpperCAmelCase__: Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 1_0] )
@pytest.mark.parametrize(
"fields" ,[None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: int = pa.BufferOutputStream()
UpperCAmelCase__: Optional[Any] = pa.schema(SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE ,schema=SCREAMING_SNAKE_CASE ,writer_batch_size=SCREAMING_SNAKE_CASE ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
UpperCAmelCase__ , UpperCAmelCase__: int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase__: Optional[Any] = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 1_0] )
@pytest.mark.parametrize(
"fields" ,[None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Tuple = pa.BufferOutputStream()
UpperCAmelCase__: List[str] = pa.schema(SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE ,schema=SCREAMING_SNAKE_CASE ,writer_batch_size=SCREAMING_SNAKE_CASE ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
UpperCAmelCase__ , UpperCAmelCase__: Any = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase__: str = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 1_0] )
@pytest.mark.parametrize(
"fields" ,[None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: List[str] = pa.BufferOutputStream()
UpperCAmelCase__: List[Any] = pa.schema(SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE ,schema=SCREAMING_SNAKE_CASE ,writer_batch_size=SCREAMING_SNAKE_CASE ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
UpperCAmelCase__ , UpperCAmelCase__: Any = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase__: Dict = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _A ( ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__: List[str] = {"col_1": pa.string(), "col_2": pa.intaa()}
UpperCAmelCase__: str = os.path.join(SCREAMING_SNAKE_CASE ,"test.arrow" )
with ArrowWriter(path=SCREAMING_SNAKE_CASE ,schema=pa.schema(SCREAMING_SNAKE_CASE ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
UpperCAmelCase__ , UpperCAmelCase__: Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE ,metadata=writer._schema.metadata )
_check_output(SCREAMING_SNAKE_CASE ,1 )
def _A ( SCREAMING_SNAKE_CASE ):
if pa.types.is_list(SCREAMING_SNAKE_CASE ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
if isinstance(lst[0] ,SCREAMING_SNAKE_CASE ):
change_first_primitive_element_in_list(lst[0] ,SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase__: Dict = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" ,[(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: str = pa.array(TypedSequence(SCREAMING_SNAKE_CASE ,optimized_int_type=SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" ,[
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] ,)
@pytest.mark.parametrize("sequence" ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
# in range
UpperCAmelCase__: List[Any] = pa.array(OptimizedTypedSequence(SCREAMING_SNAKE_CASE ,col=SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
UpperCAmelCase__: Dict = copy.deepcopy(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: List[Any] = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Tuple = pa.array(OptimizedTypedSequence(SCREAMING_SNAKE_CASE ,col=SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" ,[False, True] )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: List[Any] = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=SCREAMING_SNAKE_CASE ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: List[Any] = "mock://dataset-train.arrow"
with ArrowWriter(path=SCREAMING_SNAKE_CASE ,storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs ,type(SCREAMING_SNAKE_CASE ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
UpperCAmelCase__ , UpperCAmelCase__: List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(SCREAMING_SNAKE_CASE )
def _A ( ):
UpperCAmelCase__: Dict = pa.BufferOutputStream()
with ParquetWriter(stream=SCREAMING_SNAKE_CASE ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
UpperCAmelCase__ , UpperCAmelCase__: str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
UpperCAmelCase__: str = pa.BufferReader(output.getvalue() )
UpperCAmelCase__: pa.Table = pq.read_table(SCREAMING_SNAKE_CASE )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" ,[False, True] )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
import PIL.Image
UpperCAmelCase__: List[str] = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) ,dtype=np.uinta ) ).save(SCREAMING_SNAKE_CASE ,format="png" )
UpperCAmelCase__: List[Any] = pa.BufferOutputStream()
with ParquetWriter(
stream=SCREAMING_SNAKE_CASE ,features=Features({"image": Image()} ) ,embed_local_files=SCREAMING_SNAKE_CASE ) as writer:
writer.write({"image": image_path} )
writer.finalize()
UpperCAmelCase__: Optional[int] = pa.BufferReader(output.getvalue() )
UpperCAmelCase__: pa.Table = pq.read_table(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Dict = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] ,SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE ,"rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _A ( ):
UpperCAmelCase__: List[str] = pa.schema([pa.field("col_1" ,pa.string() ,nullable=SCREAMING_SNAKE_CASE )] )
UpperCAmelCase__: Optional[Any] = pa.BufferOutputStream()
with ArrowWriter(stream=SCREAMING_SNAKE_CASE ) as writer:
writer._build_writer(inferred_schema=SCREAMING_SNAKE_CASE )
assert writer._schema == pa.schema([pa.field("col_1" ,pa.string() )] )
| 113
| 0
|
def lowerCAmelCase ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(_lowerCAmelCase , x % y )
def lowerCAmelCase ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase ( _lowerCAmelCase : int = 20 ):
"""simple docstring"""
UpperCAmelCase__ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase__ = lcm(_lowerCAmelCase , _lowerCAmelCase )
return g
if __name__ == "__main__":
print(F'''{solution() = }''')
| 717
|
import math
def lowerCAmelCase ( _lowerCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def lowerCAmelCase ( _lowerCAmelCase : float = 1 / 1_2345 ):
"""simple docstring"""
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 3
while True:
UpperCAmelCase__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
UpperCAmelCase__ = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 364
| 0
|
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
A = TaConfig.from_json_file(UpperCamelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
A = TaForConditionalGeneration(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 690
|
"""simple docstring"""
def __snake_case ( UpperCamelCase__ ) -> list[int]:
"""simple docstring"""
A = [0 for i in range(len(UpperCamelCase__ ) )]
# initialize interval's left pointer and right pointer
A , A = 0, 0
for i in range(1 , len(UpperCamelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
A = min(right_pointer - i + 1 , z_result[i - left_pointer] )
A = min_edge
while go_next(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
A , A = i, i + z_result[i] - 1
return z_result
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
"""simple docstring"""
return i + z_result[i] < len(UpperCamelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
A = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCamelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690
| 1
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__A : Tuple = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__A : Union[str, Any] = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE )[0]
@deprecated(SCREAMING_SNAKE_CASE , """Please use tf.data to implement this functionality.""" )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE ) as bytestream:
SCREAMING_SNAKE_CASE = _readaa(SCREAMING_SNAKE_CASE )
if magic != 20_51:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
SCREAMING_SNAKE_CASE = _readaa(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = _readaa(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = _readaa(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = bytestream.read(rows * cols * num_images )
SCREAMING_SNAKE_CASE = numpy.frombuffer(SCREAMING_SNAKE_CASE , dtype=numpy.uinta )
SCREAMING_SNAKE_CASE = data.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 )
return data
@deprecated(SCREAMING_SNAKE_CASE , """Please use tf.one_hot on tensors.""" )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = labels_dense.shape[0]
SCREAMING_SNAKE_CASE = numpy.arange(SCREAMING_SNAKE_CASE ) * num_classes
SCREAMING_SNAKE_CASE = numpy.zeros((num_labels, num_classes) )
SCREAMING_SNAKE_CASE = 1
return labels_one_hot
@deprecated(SCREAMING_SNAKE_CASE , """Please use tf.data to implement this functionality.""" )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=10 ):
'''simple docstring'''
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE ) as bytestream:
SCREAMING_SNAKE_CASE = _readaa(SCREAMING_SNAKE_CASE )
if magic != 20_49:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
SCREAMING_SNAKE_CASE = _readaa(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = bytestream.read(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = numpy.frombuffer(SCREAMING_SNAKE_CASE , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return labels
class UpperCAmelCase_ :
'''simple docstring'''
@deprecated(
a , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self : Optional[Any] , a : List[str] , a : str , a : Union[str, Any]=False , a : int=False , a : Optional[int]=dtypes.floataa , a : Tuple=True , a : int=None , ) -> Optional[int]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = random_seed.get_seed(a )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
SCREAMING_SNAKE_CASE = dtypes.as_dtype(a ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
SCREAMING_SNAKE_CASE = 10_000
SCREAMING_SNAKE_CASE = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"""images.shape: {images.shape} labels.shape: {labels.shape}"""
SCREAMING_SNAKE_CASE = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
SCREAMING_SNAKE_CASE = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
SCREAMING_SNAKE_CASE = images.astype(numpy.floataa )
SCREAMING_SNAKE_CASE = numpy.multiply(a , 1.0 / 255.0 )
SCREAMING_SNAKE_CASE = images
SCREAMING_SNAKE_CASE = labels
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
@property
def _UpperCAmelCase ( self : Union[str, Any] ) -> str:
return self._images
@property
def _UpperCAmelCase ( self : str ) -> List[str]:
return self._labels
@property
def _UpperCAmelCase ( self : str ) -> Union[str, Any]:
return self._num_examples
@property
def _UpperCAmelCase ( self : Tuple ) -> Dict:
return self._epochs_completed
def _UpperCAmelCase ( self : int , a : Tuple , a : List[Any]=False , a : List[Any]=True ) -> Dict:
if fake_data:
SCREAMING_SNAKE_CASE = [1] * 784
SCREAMING_SNAKE_CASE = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(a )],
[fake_label for _ in range(a )],
)
SCREAMING_SNAKE_CASE = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
SCREAMING_SNAKE_CASE = numpy.arange(self._num_examples )
numpy.random.shuffle(a )
SCREAMING_SNAKE_CASE = self.images[perma]
SCREAMING_SNAKE_CASE = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
SCREAMING_SNAKE_CASE = self._num_examples - start
SCREAMING_SNAKE_CASE = self._images[start : self._num_examples]
SCREAMING_SNAKE_CASE = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
SCREAMING_SNAKE_CASE = numpy.arange(self._num_examples )
numpy.random.shuffle(a )
SCREAMING_SNAKE_CASE = self.images[perm]
SCREAMING_SNAKE_CASE = self.labels[perm]
# Start next epoch
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = batch_size - rest_num_examples
SCREAMING_SNAKE_CASE = self._index_in_epoch
SCREAMING_SNAKE_CASE = self._images[start:end]
SCREAMING_SNAKE_CASE = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
SCREAMING_SNAKE_CASE = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(SCREAMING_SNAKE_CASE , """Please write your own downloading logic.""" )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not gfile.Exists(SCREAMING_SNAKE_CASE ):
gfile.MakeDirs(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not gfile.Exists(SCREAMING_SNAKE_CASE ):
urllib.request.urlretrieve(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # noqa: S310
with gfile.GFile(SCREAMING_SNAKE_CASE ) as f:
SCREAMING_SNAKE_CASE = f.size()
print("""Successfully downloaded""" , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , """bytes.""" )
return filepath
@deprecated(
SCREAMING_SNAKE_CASE , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=dtypes.floataa , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50_00 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=DEFAULT_SOURCE_URL , ):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=SCREAMING_SNAKE_CASE , one_hot=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE , seed=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = fake()
SCREAMING_SNAKE_CASE = fake()
SCREAMING_SNAKE_CASE = fake()
return _Datasets(train=SCREAMING_SNAKE_CASE , validation=SCREAMING_SNAKE_CASE , test=SCREAMING_SNAKE_CASE )
if not source_url: # empty string check
SCREAMING_SNAKE_CASE = DEFAULT_SOURCE_URL
SCREAMING_SNAKE_CASE = """train-images-idx3-ubyte.gz"""
SCREAMING_SNAKE_CASE = """train-labels-idx1-ubyte.gz"""
SCREAMING_SNAKE_CASE = """t10k-images-idx3-ubyte.gz"""
SCREAMING_SNAKE_CASE = """t10k-labels-idx1-ubyte.gz"""
SCREAMING_SNAKE_CASE = _maybe_download(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , source_url + train_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE , """rb""" ) as f:
SCREAMING_SNAKE_CASE = _extract_images(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = _maybe_download(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , source_url + train_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE , """rb""" ) as f:
SCREAMING_SNAKE_CASE = _extract_labels(SCREAMING_SNAKE_CASE , one_hot=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = _maybe_download(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , source_url + test_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE , """rb""" ) as f:
SCREAMING_SNAKE_CASE = _extract_images(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = _maybe_download(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , source_url + test_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE , """rb""" ) as f:
SCREAMING_SNAKE_CASE = _extract_labels(SCREAMING_SNAKE_CASE , one_hot=SCREAMING_SNAKE_CASE )
if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = (
"""Validation size should be between 0 and """
f"""{len(SCREAMING_SNAKE_CASE )}. Received: {validation_size}."""
)
raise ValueError(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = train_images[:validation_size]
SCREAMING_SNAKE_CASE = train_labels[:validation_size]
SCREAMING_SNAKE_CASE = train_images[validation_size:]
SCREAMING_SNAKE_CASE = train_labels[validation_size:]
SCREAMING_SNAKE_CASE = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
SCREAMING_SNAKE_CASE = _DataSet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = _DataSet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = _DataSet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return _Datasets(train=SCREAMING_SNAKE_CASE , validation=SCREAMING_SNAKE_CASE , test=SCREAMING_SNAKE_CASE )
| 450
|
from __future__ import annotations
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
create_state_space_tree(SCREAMING_SNAKE_CASE , [] , 0 , [0 for i in range(len(SCREAMING_SNAKE_CASE ) )] )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
if index == len(SCREAMING_SNAKE_CASE ):
print(SCREAMING_SNAKE_CASE )
return
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE = True
create_state_space_tree(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 , SCREAMING_SNAKE_CASE )
current_sequence.pop()
SCREAMING_SNAKE_CASE = False
__A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 450
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__magic_name__ : Dict = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=7 , _A=3 , _A=1_8 , _A=3_0 , _A=4_0_0 , _A=None , _A=True , _A=True , _A=None , ):
'''simple docstring'''
UpperCamelCase : Any = size if size is not None else {"""height""": 2_0, """width""": 2_0}
UpperCamelCase : Tuple = parent
UpperCamelCase : str = batch_size
UpperCamelCase : str = num_channels
UpperCamelCase : List[Any] = image_size
UpperCamelCase : Any = min_resolution
UpperCamelCase : Any = max_resolution
UpperCamelCase : int = size
UpperCamelCase : Dict = do_normalize
UpperCamelCase : Optional[int] = do_convert_rgb
UpperCamelCase : Any = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
UpperCamelCase : List[str] = patch_size if patch_size is not None else {"""height""": 1_6, """width""": 1_6}
def _a ( self ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
UpperCamelCase : List[Any] = Image.open(requests.get(_A , stream=_A ).raw ).convert("""RGB""" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowercase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = PixaStructImageProcessor if is_vision_available() else None
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = PixaStructImageProcessingTester(self )
@property
def _a ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , """do_normalize""" ) )
self.assertTrue(hasattr(_A , """do_convert_rgb""" ) )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.image_processor_tester.prepare_dummy_image()
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase : List[Any] = 2_0_4_8
UpperCamelCase : str = image_processor(_A , return_tensors="""pt""" , max_patches=_A )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1e-3 , rtol=1e-3 ) )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCamelCase : Optional[int] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase : Optional[Any] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase : Optional[Any] = image_processor(
_A , return_tensors="""pt""" , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCamelCase : int = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
UpperCamelCase : Tuple = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_A ):
UpperCamelCase : Optional[int] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_A ).flattened_patches
UpperCamelCase : Dict = """Hello"""
UpperCamelCase : Optional[Any] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_A , header_text=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase : str = image_processor(
_A , return_tensors="""pt""" , max_patches=_A , header_text=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
UpperCamelCase : Optional[Any] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase : Tuple = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase : int = image_processor(
_A , return_tensors="""pt""" , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCamelCase : List[str] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase : List[str] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase : List[Any] = image_processor(
_A , return_tensors="""pt""" , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowercase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] = PixaStructImageProcessor if is_vision_available() else None
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCamelCase : Optional[int] = 3
@property
def _a ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , """do_normalize""" ) )
self.assertTrue(hasattr(_A , """do_convert_rgb""" ) )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCamelCase : str = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase : Optional[Any] = image_processor(
_A , return_tensors="""pt""" , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 102
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCAmelCase ( UpperCAmelCase_ ):
_A : str = ["""image_processor""", """tokenizer"""]
_A : str = """OwlViTImageProcessor"""
_A : List[str] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __A=None , __A=None , **__A ):
__UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __A , )
__UpperCAmelCase = kwargs.pop('feature_extractor' )
__UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , __A="max_length" , __A="np" , **__A ):
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )):
__UpperCAmelCase = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )]
elif isinstance(__A , __A ) and isinstance(text[0] , __A ):
__UpperCAmelCase = []
# Maximum number of queries across batch
__UpperCAmelCase = max([len(__A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__A ) != max_num_queries:
__UpperCAmelCase = t + [' '] * (max_num_queries - len(__A ))
__UpperCAmelCase = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )
encodings.append(__A )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCAmelCase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCAmelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCAmelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCAmelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCAmelCase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__UpperCAmelCase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCAmelCase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCAmelCase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCAmelCase = BatchEncoding()
__UpperCAmelCase = input_ids
__UpperCAmelCase = attention_mask
if query_images is not None:
__UpperCAmelCase = BatchEncoding()
__UpperCAmelCase = self.image_processor(
__A , return_tensors=__A , **__A ).pixel_values
__UpperCAmelCase = query_pixel_values
if images is not None:
__UpperCAmelCase = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
__UpperCAmelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def __lowerCamelCase ( self , *__A , **__A ):
return self.image_processor.post_process(*__A , **__A )
def __lowerCamelCase ( self , *__A , **__A ):
return self.image_processor.post_process_object_detection(*__A , **__A )
def __lowerCamelCase ( self , *__A , **__A ):
return self.image_processor.post_process_image_guided_detection(*__A , **__A )
def __lowerCamelCase ( self , *__A , **__A ):
return self.tokenizer.batch_decode(*__A , **__A )
def __lowerCamelCase ( self , *__A , **__A ):
return self.tokenizer.decode(*__A , **__A )
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , )
return self.image_processor_class
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __A , )
return self.image_processor
| 126
| 0
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "arrow" , **__SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(
split=UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case__ : str =load_from_cache_file
snake_case__ : int =file_format
snake_case__ : Optional[Any] =Spark(
df=UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , working_dir=UpperCAmelCase__ , **UpperCAmelCase__ , )
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
snake_case__ : Optional[Any] =None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 718
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ =KandinskyVaaPipeline
lowerCAmelCase__ =[
'''image_embeds''',
'''negative_image_embeds''',
]
lowerCAmelCase__ =['''image_embeds''', '''negative_image_embeds''']
lowerCAmelCase__ =[
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase__ =False
@property
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : Any ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
snake_case__ : Union[str, Any] =UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : List[Any] =VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Union[str, Any] =self.dummy_unet
snake_case__ : Dict =self.dummy_movq
snake_case__ : Any =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__SCREAMING_SNAKE_CASE , )
snake_case__ : List[str] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[Any] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : str =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
snake_case__ : List[str] =torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Any =torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] ={
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : int ='''cpu'''
snake_case__ : Dict =self.get_dummy_components()
snake_case__ : List[Any] =self.pipeline_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Union[str, Any] =output.images
snake_case__ : List[str] =pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
snake_case__ : List[Any] =image[0, -3:, -3:, -1]
snake_case__ : Tuple =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ : List[Any] =np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
snake_case__ : int =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] =KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
snake_case__ : Optional[Any] =pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : int ='''red cat, 4k photo'''
snake_case__ : Dict =torch.Generator(device='''cuda''' ).manual_seed(0 )
snake_case__, snake_case__ : Optional[Any] =pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
snake_case__ : List[str] =torch.Generator(device='''cuda''' ).manual_seed(0 )
snake_case__ : Any =pipeline(
image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=100 , output_type='''np''' , )
snake_case__ : Optional[int] =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 408
| 0
|
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase_ : Any = logging.get_logger(__name__)
class __lowerCAmelCase ( __a ):
snake_case : Optional[int] = ["""audio_values""", """audio_mask"""]
def __init__(self , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=1 , lowerCAmelCase__=[1_6, 1_6] , lowerCAmelCase__=1_2_8 , lowerCAmelCase__=4_4_1_0_0 , lowerCAmelCase__=8_6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=0.0 , **lowerCAmelCase__ , ):
super().__init__(
feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : Union[str, Any] = spectrogram_length
_UpperCAmelCase : List[Any] = num_channels
_UpperCAmelCase : Optional[Any] = patch_size
_UpperCAmelCase : List[Any] = feature_size // self.patch_size[1]
_UpperCAmelCase : List[str] = n_fft
_UpperCAmelCase : int = sampling_rate // hop_length_to_sampling_rate
_UpperCAmelCase : Optional[Any] = sampling_rate
_UpperCAmelCase : List[Any] = padding_value
_UpperCAmelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=lowerCAmelCase__ , norm="""slaney""" , mel_scale="""slaney""" , ).T
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = spectrogram(
lowerCAmelCase__ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=8_0.0 , )
_UpperCAmelCase : Tuple = log_spec[:, :-1]
_UpperCAmelCase : Union[str, Any] = log_spec - 2_0.0
_UpperCAmelCase : List[Any] = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , **lowerCAmelCase__ , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_UpperCAmelCase : List[str] = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
_UpperCAmelCase : Tuple = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : Dict = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
_UpperCAmelCase : Dict = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : Any = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_UpperCAmelCase : Dict = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[Any] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_UpperCAmelCase : Union[str, Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_UpperCAmelCase : Optional[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_UpperCAmelCase : Optional[Any] = np.array(lowerCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
_UpperCAmelCase : Optional[int] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_UpperCAmelCase : Dict = np.ones([len(lowerCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_UpperCAmelCase : Dict = padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase__ ) ):
_UpperCAmelCase : List[Any] = audio_features[i]
_UpperCAmelCase : Tuple = feature
# return as BatchFeature
if return_attention_mask:
_UpperCAmelCase : List[Any] = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
_UpperCAmelCase : List[Any] = {"""audio_values""": padded_audio_features}
_UpperCAmelCase : List[str] = BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
return encoded_inputs
| 414
|
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase_ : Any = logging.get_logger(__name__)
class __lowerCAmelCase ( __a ):
snake_case : Optional[int] = ["""audio_values""", """audio_mask"""]
def __init__(self , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=1 , lowerCAmelCase__=[1_6, 1_6] , lowerCAmelCase__=1_2_8 , lowerCAmelCase__=4_4_1_0_0 , lowerCAmelCase__=8_6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=0.0 , **lowerCAmelCase__ , ):
super().__init__(
feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : Union[str, Any] = spectrogram_length
_UpperCAmelCase : List[Any] = num_channels
_UpperCAmelCase : Optional[Any] = patch_size
_UpperCAmelCase : List[Any] = feature_size // self.patch_size[1]
_UpperCAmelCase : List[str] = n_fft
_UpperCAmelCase : int = sampling_rate // hop_length_to_sampling_rate
_UpperCAmelCase : Optional[Any] = sampling_rate
_UpperCAmelCase : List[Any] = padding_value
_UpperCAmelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=lowerCAmelCase__ , norm="""slaney""" , mel_scale="""slaney""" , ).T
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = spectrogram(
lowerCAmelCase__ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=8_0.0 , )
_UpperCAmelCase : Tuple = log_spec[:, :-1]
_UpperCAmelCase : Union[str, Any] = log_spec - 2_0.0
_UpperCAmelCase : List[Any] = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , **lowerCAmelCase__ , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_UpperCAmelCase : List[str] = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
_UpperCAmelCase : Tuple = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : Dict = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
_UpperCAmelCase : Dict = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : Any = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_UpperCAmelCase : Dict = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[Any] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_UpperCAmelCase : Union[str, Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_UpperCAmelCase : Optional[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_UpperCAmelCase : Optional[Any] = np.array(lowerCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
_UpperCAmelCase : Optional[int] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_UpperCAmelCase : Dict = np.ones([len(lowerCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_UpperCAmelCase : Dict = padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase__ ) ):
_UpperCAmelCase : List[Any] = audio_features[i]
_UpperCAmelCase : Tuple = feature
# return as BatchFeature
if return_attention_mask:
_UpperCAmelCase : List[Any] = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
_UpperCAmelCase : List[Any] = {"""audio_values""": padded_audio_features}
_UpperCAmelCase : List[str] = BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
return encoded_inputs
| 414
| 1
|
"""simple docstring"""
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_SCREAMING_SNAKE_CASE : List[str] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_SCREAMING_SNAKE_CASE : int = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
_SCREAMING_SNAKE_CASE : List[str] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_SCREAMING_SNAKE_CASE : Optional[int] = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_SCREAMING_SNAKE_CASE : Optional[Any] = '''allenai'''
def lowerCamelCase__ ( _lowerCamelCase : int ) -> Any:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase_ = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase_ = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
lowerCamelCase_ = d[k] # restore
return da
def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int ) -> Optional[Any]:
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase_ = basename(_lowerCamelCase )
lowerCamelCase_ = dirname(_lowerCamelCase )
lowerCamelCase_ = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase_ = cls.hub_models()
lowerCamelCase_ = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase_ = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''' )
lowerCamelCase_ = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase_ = vars(chkpt['args']['model'] )
lowerCamelCase_ = args['source_lang']
lowerCamelCase_ = args['target_lang']
lowerCamelCase_ = dirname(_lowerCamelCase )
lowerCamelCase_ = basename(_lowerCamelCase )
# dicts
lowerCamelCase_ = os.path.join(_lowerCamelCase , F'''dict.{src_lang}.txt''' )
lowerCamelCase_ = os.path.join(_lowerCamelCase , F'''dict.{tgt_lang}.txt''' )
lowerCamelCase_ = Dictionary.load(_lowerCamelCase )
lowerCamelCase_ = rewrite_dict_keys(src_dict.indices )
lowerCamelCase_ = len(_lowerCamelCase )
lowerCamelCase_ = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase_ = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase_ = False
break
lowerCamelCase_ = Dictionary.load(_lowerCamelCase )
lowerCamelCase_ = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase_ = len(_lowerCamelCase )
lowerCamelCase_ = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase_ = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase_ = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase_ = fin.read()
lowerCamelCase_ = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(F'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase_ = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args["bpe"]}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args["tokenizer"]}'''
lowerCamelCase_ = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase_ = 5
lowerCamelCase_ = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase_ = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase_ = 1.0
print(F'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase_ = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase_ = chkpt['models'][0]
lowerCamelCase_ = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase_ = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase_ = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase_ = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase_ = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(F'''cd {data_root}''' )
print(F'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 706
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class a ( unittest.TestCase ):
def UpperCamelCase ( self : Tuple ) -> str:
lowerCamelCase_ = [10, 20, 30, 40, 50, 60]
lowerCamelCase_ = [2, 4, 6, 8, 10, 12]
lowerCamelCase_ = 100
self.assertEqual(kp.calc_profit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 210 )
def UpperCamelCase ( self : Tuple ) -> Dict:
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , 'max_weight must greater than zero.' )
def UpperCamelCase ( self : Dict ) -> str:
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , 'Weight can not be negative.' )
def UpperCamelCase ( self : List[str] ) -> Any:
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , 'Profit can not be negative.' )
def UpperCamelCase ( self : Any ) -> Dict:
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , 'max_weight must greater than zero.' )
def UpperCamelCase ( self : List[str] ) -> str:
self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 137
| 0
|
from __future__ import annotations
import math
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> list[int]:
"""simple docstring"""
if num <= 0:
UpperCamelCase_ = f"{num}: Invalid input, please enter a positive integer."
raise ValueError(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = [True] * (num + 1)
UpperCamelCase_ = []
UpperCamelCase_ = 2
UpperCamelCase_ = int(math.sqrt(SCREAMING_SNAKE_CASE_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE_ ):
if sieve[i] is True:
UpperCamelCase_ = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 628
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = SwinvaConfig()
UpperCamelCase_ = swinva_name.split("_" )
UpperCamelCase_ = name_split[1]
if "to" in name_split[3]:
UpperCamelCase_ = int(name_split[3][-3:] )
else:
UpperCamelCase_ = int(name_split[3] )
if "to" in name_split[2]:
UpperCamelCase_ = int(name_split[2][-2:] )
else:
UpperCamelCase_ = int(name_split[2][6:] )
if model_size == "tiny":
UpperCamelCase_ = 9_6
UpperCamelCase_ = (2, 2, 6, 2)
UpperCamelCase_ = (3, 6, 1_2, 2_4)
elif model_size == "small":
UpperCamelCase_ = 9_6
UpperCamelCase_ = (2, 2, 1_8, 2)
UpperCamelCase_ = (3, 6, 1_2, 2_4)
elif model_size == "base":
UpperCamelCase_ = 1_2_8
UpperCamelCase_ = (2, 2, 1_8, 2)
UpperCamelCase_ = (4, 8, 1_6, 3_2)
else:
UpperCamelCase_ = 1_9_2
UpperCamelCase_ = (2, 2, 1_8, 2)
UpperCamelCase_ = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
UpperCamelCase_ = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
UpperCamelCase_ = 2_1_8_4_1
UpperCamelCase_ = "huggingface/label-files"
UpperCamelCase_ = "imagenet-22k-id2label.json"
UpperCamelCase_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
UpperCamelCase_ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase_ = 1_0_0_0
UpperCamelCase_ = "huggingface/label-files"
UpperCamelCase_ = "imagenet-1k-id2label.json"
UpperCamelCase_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
UpperCamelCase_ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
UpperCamelCase_ = img_size
UpperCamelCase_ = num_classes
UpperCamelCase_ = embed_dim
UpperCamelCase_ = depths
UpperCamelCase_ = num_heads
UpperCamelCase_ = window_size
return config
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
"""simple docstring"""
if "patch_embed.proj" in name:
UpperCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
UpperCamelCase_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
UpperCamelCase_ = "encoder." + name
if "attn.proj" in name:
UpperCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
UpperCamelCase_ = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
UpperCamelCase_ = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
UpperCamelCase_ = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
UpperCamelCase_ = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
UpperCamelCase_ = "layernorm.weight"
if name == "norm.bias":
UpperCamelCase_ = "layernorm.bias"
if "head" in name:
UpperCamelCase_ = name.replace("head" , "classifier" )
else:
UpperCamelCase_ = "swinv2." + name
return name
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase_ = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase_ = key.split("." )
UpperCamelCase_ = int(key_split[1] )
UpperCamelCase_ = int(key_split[3] )
UpperCamelCase_ = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase_ = val[:dim, :]
UpperCamelCase_ = val[dim : dim * 2, :]
UpperCamelCase_ = val[-dim:, :]
else:
UpperCamelCase_ = val[:dim]
UpperCamelCase_ = val[
dim : dim * 2
]
UpperCamelCase_ = val[-dim:]
else:
UpperCamelCase_ = val
return orig_state_dict
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any:
"""simple docstring"""
UpperCamelCase_ = timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
UpperCamelCase_ = get_swinva_config(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = SwinvaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase_ = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase_ = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
UpperCamelCase_ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
UpperCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
UpperCamelCase_ = timm_model(inputs["pixel_values"] )
UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
print(f"Saving model {swinva_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE :int = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 628
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = '''trocr'''
__UpperCamelCase : Any = ['''past_key_values''']
__UpperCamelCase : List[str] = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__(self , SCREAMING_SNAKE_CASE__=5_02_65 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=40_96 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , **SCREAMING_SNAKE_CASE__ , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = d_model
SCREAMING_SNAKE_CASE__ : Any = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[str] = activation_function
SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict = dropout
SCREAMING_SNAKE_CASE__ : str = attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] = activation_dropout
SCREAMING_SNAKE_CASE__ : str = init_std
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE__ : str = scale_embedding
SCREAMING_SNAKE_CASE__ : Tuple = use_learned_position_embeddings
SCREAMING_SNAKE_CASE__ : str = layernorm_embedding
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 702
|
"""simple docstring"""
def lowercase_ ( _snake_case ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
SCREAMING_SNAKE_CASE__ : List[str] = 1
SCREAMING_SNAKE_CASE__ : List[Any] = 1
while repunit:
SCREAMING_SNAKE_CASE__ : Dict = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowercase_ ( _snake_case = 1_000_000 ):
SCREAMING_SNAKE_CASE__ : int = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_snake_case ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 545
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE :Dict = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Dict = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[Any] = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 628
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :List[Any] = LayoutLMTokenizer
UpperCamelCase_ :Dict = LayoutLMTokenizerFast
UpperCamelCase_ :List[str] = True
UpperCamelCase_ :Dict = True
def UpperCAmelCase_ ( self )-> str:
super().setUp()
UpperCamelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self , **_lowercase )-> List[str]:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase_ ( self , _lowercase )-> Union[str, Any]:
UpperCamelCase_ = "UNwant\u00E9d,running"
UpperCamelCase_ = "unwanted, running"
return input_text, output_text
def UpperCAmelCase_ ( self )-> Tuple:
UpperCamelCase_ = self.tokenizer_class(self.vocab_file )
UpperCamelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_lowercase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 10, 8, 9] )
def UpperCAmelCase_ ( self )-> Dict:
pass
| 628
| 1
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCamelCase_ = logging.get_logger(__name__)
# General docstring
UpperCamelCase_ = 'RegNetConfig'
# Base docstring
UpperCamelCase_ = 'facebook/regnet-y-040'
UpperCamelCase_ = [1, 1_0_8_8, 7, 7]
# Image classification docstring
UpperCamelCase_ = 'facebook/regnet-y-040'
UpperCamelCase_ = 'tabby, tabby cat'
UpperCamelCase_ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 3 , UpperCAmelCase = 1 , UpperCAmelCase = 1 , UpperCAmelCase = "relu" , ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase =nn.Convad(
UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=UpperCAmelCase__ , stride=UpperCAmelCase__ , padding=kernel_size // 2 , groups=UpperCAmelCase__ , bias=UpperCAmelCase__ , )
__UpperCAmelCase =nn.BatchNormad(UpperCAmelCase__)
__UpperCAmelCase =ACTaFN[activation] if activation is not None else nn.Identity()
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.convolution(UpperCAmelCase__)
__UpperCAmelCase =self.normalization(UpperCAmelCase__)
__UpperCAmelCase =self.activation(UpperCAmelCase__)
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__(self , UpperCAmelCase):
'''simple docstring'''
super().__init__()
__UpperCAmelCase =RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act)
__UpperCAmelCase =config.num_channels
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''')
__UpperCAmelCase =self.embedder(UpperCAmelCase__)
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 2):
'''simple docstring'''
super().__init__()
__UpperCAmelCase =nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , stride=UpperCAmelCase__ , bias=UpperCAmelCase__)
__UpperCAmelCase =nn.BatchNormad(UpperCAmelCase__)
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.convolution(UpperCAmelCase__)
__UpperCAmelCase =self.normalization(UpperCAmelCase__)
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__(self , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
super().__init__()
__UpperCAmelCase =nn.AdaptiveAvgPoolad((1, 1))
__UpperCAmelCase =nn.Sequential(
nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1) , nn.ReLU() , nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1) , nn.Sigmoid() , )
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.pooler(UpperCAmelCase__)
__UpperCAmelCase =self.attention(UpperCAmelCase__)
__UpperCAmelCase =hidden_state * attention
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1):
'''simple docstring'''
super().__init__()
__UpperCAmelCase =in_channels != out_channels or stride != 1
__UpperCAmelCase =max(1 , out_channels // config.groups_width)
__UpperCAmelCase =(
RegNetShortCut(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__) if should_apply_shortcut else nn.Identity()
)
__UpperCAmelCase =nn.Sequential(
RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__) , )
__UpperCAmelCase =ACTaFN[config.hidden_act]
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =hidden_state
__UpperCAmelCase =self.layer(UpperCAmelCase__)
__UpperCAmelCase =self.shortcut(UpperCAmelCase__)
hidden_state += residual
__UpperCAmelCase =self.activation(UpperCAmelCase__)
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1):
'''simple docstring'''
super().__init__()
__UpperCAmelCase =in_channels != out_channels or stride != 1
__UpperCAmelCase =max(1 , out_channels // config.groups_width)
__UpperCAmelCase =(
RegNetShortCut(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__) if should_apply_shortcut else nn.Identity()
)
__UpperCAmelCase =nn.Sequential(
RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act) , RegNetSELayer(UpperCAmelCase__ , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__) , )
__UpperCAmelCase =ACTaFN[config.hidden_act]
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =hidden_state
__UpperCAmelCase =self.layer(UpperCAmelCase__)
__UpperCAmelCase =self.shortcut(UpperCAmelCase__)
hidden_state += residual
__UpperCAmelCase =self.activation(UpperCAmelCase__)
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 2 , UpperCAmelCase = 2 , ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase =RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
__UpperCAmelCase =nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , ) , *[layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) for _ in range(depth - 1)] , )
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.layers(UpperCAmelCase__)
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__(self , UpperCAmelCase):
'''simple docstring'''
super().__init__()
__UpperCAmelCase =nn.ModuleList([])
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
UpperCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
__UpperCAmelCase =zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(UpperCAmelCase__ , config.depths[1:]):
self.stages.append(RegNetStage(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , depth=UpperCAmelCase__))
def A__ (self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = True):
'''simple docstring'''
__UpperCAmelCase =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCAmelCase =hidden_states + (hidden_state,)
__UpperCAmelCase =stage_module(UpperCAmelCase__)
if output_hidden_states:
__UpperCAmelCase =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase__ , hidden_states=UpperCAmelCase__)
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : List[Any] = RegNetConfig
a_ : Tuple = 'regnet'
a_ : str = 'pixel_values'
a_ : Union[str, Any] = True
def A__ (self , UpperCAmelCase):
'''simple docstring'''
if isinstance(UpperCAmelCase__ , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''')
elif isinstance(UpperCAmelCase__ , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def A__ (self , UpperCAmelCase , UpperCAmelCase=False):
'''simple docstring'''
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
__UpperCAmelCase =value
UpperCamelCase_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCamelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , _lowerCAmelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def __init__(self , UpperCAmelCase):
'''simple docstring'''
super().__init__(UpperCAmelCase__)
__UpperCAmelCase =config
__UpperCAmelCase =RegNetEmbeddings(UpperCAmelCase__)
__UpperCAmelCase =RegNetEncoder(UpperCAmelCase__)
__UpperCAmelCase =nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A__ (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None):
'''simple docstring'''
__UpperCAmelCase =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase =return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase =self.embedder(UpperCAmelCase__)
__UpperCAmelCase =self.encoder(
UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__)
__UpperCAmelCase =encoder_outputs[0]
__UpperCAmelCase =self.pooler(UpperCAmelCase__)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase__ , pooler_output=UpperCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ''' , _lowerCAmelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def __init__(self , UpperCAmelCase):
'''simple docstring'''
super().__init__(UpperCAmelCase__)
__UpperCAmelCase =config.num_labels
__UpperCAmelCase =RegNetModel(UpperCAmelCase__)
# classification head
__UpperCAmelCase =nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A__ (self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ):
'''simple docstring'''
__UpperCAmelCase =return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase =self.regnet(UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__)
__UpperCAmelCase =outputs.pooler_output if return_dict else outputs[1]
__UpperCAmelCase =self.classifier(UpperCAmelCase__)
__UpperCAmelCase =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__UpperCAmelCase ='''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__UpperCAmelCase ='''single_label_classification'''
else:
__UpperCAmelCase ='''multi_label_classification'''
if self.config.problem_type == "regression":
__UpperCAmelCase =MSELoss()
if self.num_labels == 1:
__UpperCAmelCase =loss_fct(logits.squeeze() , labels.squeeze())
else:
__UpperCAmelCase =loss_fct(UpperCAmelCase__ , UpperCAmelCase__)
elif self.config.problem_type == "single_label_classification":
__UpperCAmelCase =CrossEntropyLoss()
__UpperCAmelCase =loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__UpperCAmelCase =BCEWithLogitsLoss()
__UpperCAmelCase =loss_fct(UpperCAmelCase__ , UpperCAmelCase__)
if not return_dict:
__UpperCAmelCase =(logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase__ , logits=UpperCAmelCase__ , hidden_states=outputs.hidden_states)
| 718
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 142
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class SCREAMING_SNAKE_CASE_ :
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=1_6 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , lowercase=1_0_0_0 , ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : List[str] = batch_size
__SCREAMING_SNAKE_CASE : List[str] = seq_length
__SCREAMING_SNAKE_CASE : int = is_training
__SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask
__SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
__SCREAMING_SNAKE_CASE : Dict = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : str = hidden_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Any = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : int = hidden_act
__SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
__SCREAMING_SNAKE_CASE : int = num_choices
__SCREAMING_SNAKE_CASE : int = scope
__SCREAMING_SNAKE_CASE : Dict = range_bbox
def _snake_case ( self ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
__SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__SCREAMING_SNAKE_CASE : Tuple = bbox[i, j, 3]
__SCREAMING_SNAKE_CASE : str = bbox[i, j, 1]
__SCREAMING_SNAKE_CASE : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__SCREAMING_SNAKE_CASE : Optional[int] = bbox[i, j, 2]
__SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0]
__SCREAMING_SNAKE_CASE : List[Any] = t
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.convert_to_tensor(lowercase )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : List[str] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : Tuple = None
__SCREAMING_SNAKE_CASE : Any = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Dict = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = TFLayoutLMModel(config=lowercase )
__SCREAMING_SNAKE_CASE : Dict = model(lowercase , lowercase , attention_mask=lowercase , token_type_ids=lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowercase , lowercase , token_type_ids=lowercase )
__SCREAMING_SNAKE_CASE : Any = model(lowercase , lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = TFLayoutLMForMaskedLM(config=lowercase )
__SCREAMING_SNAKE_CASE : int = model(lowercase , lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : Any = TFLayoutLMForSequenceClassification(config=lowercase )
__SCREAMING_SNAKE_CASE : Tuple = model(lowercase , lowercase , attention_mask=lowercase , token_type_ids=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = self.num_labels
__SCREAMING_SNAKE_CASE : Dict = TFLayoutLMForTokenClassification(config=lowercase )
__SCREAMING_SNAKE_CASE : Tuple = model(lowercase , lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = TFLayoutLMForQuestionAnswering(config=lowercase )
__SCREAMING_SNAKE_CASE : List[str] = model(lowercase , lowercase , attention_mask=lowercase , token_type_ids=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : Any = config_and_inputs
__SCREAMING_SNAKE_CASE : Tuple = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case , snake_case , unittest.TestCase ):
__a : Optional[Any] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
__a : str = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
__a : Optional[int] = False
__a : Union[str, Any] = True
__a : Any = 10
def _snake_case ( self ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = TFLayoutLMModelTester(self )
__SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=lowercase , hidden_size=3_7 )
def _snake_case ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _snake_case ( self ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def _snake_case ( self ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase )
def _snake_case ( self ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
def _snake_case ( self ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
@slow
def _snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : List[Any] = TFLayoutLMModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@unittest.skip('''Onnx compliancy broke with TF 2.10''' )
def _snake_case ( self ) -> Optional[int]:
'''simple docstring'''
pass
def A_ ( ) -> Optional[Any]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
__SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
__SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
__SCREAMING_SNAKE_CASE : Any = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
__SCREAMING_SNAKE_CASE : Tuple = model(input_ids=lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase )
# test the sequence output on [0, :3, :3]
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor(
[[0.1_7_8_5, -0.1_9_4_7, -0.0_4_2_5], [-0.3_2_5_4, -0.2_8_0_7, 0.2_5_5_3], [-0.5_3_9_1, -0.3_3_2_2, 0.3_3_6_4]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1e-3 ) )
# test the pooled output on [1, :3]
__SCREAMING_SNAKE_CASE : Any = tf.convert_to_tensor([-0.6_5_8_0, -0.0_2_1_4, 0.8_5_5_2] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , lowercase , atol=1e-3 ) )
@slow
def _snake_case ( self ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_layoutlm_batch_inputs()
# forward pass
__SCREAMING_SNAKE_CASE : List[str] = model(
input_ids=lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
__SCREAMING_SNAKE_CASE : Dict = outputs.loss
__SCREAMING_SNAKE_CASE : int = (2,)
self.assertEqual(loss.shape , lowercase )
# test the shape of the logits
__SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.logits
__SCREAMING_SNAKE_CASE : Optional[Any] = (2, 2)
self.assertEqual(logits.shape , lowercase )
@slow
def _snake_case ( self ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=1_3 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
__SCREAMING_SNAKE_CASE : Tuple = model(
input_ids=lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
# test the shape of the logits
__SCREAMING_SNAKE_CASE : Tuple = outputs.logits
__SCREAMING_SNAKE_CASE : int = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , lowercase )
@slow
def _snake_case ( self ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
__SCREAMING_SNAKE_CASE : Any = model(input_ids=lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase )
# test the shape of the logits
__SCREAMING_SNAKE_CASE : Dict = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , lowercase )
self.assertEqual(outputs.end_logits.shape , lowercase )
| 158
|
'''simple docstring'''
from __future__ import annotations
def A_ ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> list:
__SCREAMING_SNAKE_CASE : Optional[int] = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__SCREAMING_SNAKE_CASE : Optional[int] = result + left + right
return input_list
def A_ ( __SCREAMING_SNAKE_CASE : list ) -> list:
if len(__SCREAMING_SNAKE_CASE ) <= 1:
return input_list
__SCREAMING_SNAKE_CASE : Optional[Any] = list(__SCREAMING_SNAKE_CASE )
# iteration for two-way merging
__SCREAMING_SNAKE_CASE : Dict = 2
while p <= len(__SCREAMING_SNAKE_CASE ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = i
__SCREAMING_SNAKE_CASE : Optional[int] = i + p - 1
__SCREAMING_SNAKE_CASE : int = (low + high + 1) // 2
__SCREAMING_SNAKE_CASE : Tuple = merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# final merge of last two parts
if p * 2 >= len(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Optional[int] = i
__SCREAMING_SNAKE_CASE : List[str] = merge(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_A = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
_A = []
else:
_A = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 158
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 301
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4')
_lowercase : Optional[Any] = sd_pipe.to(lowerCamelCase)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
sd_pipe.set_scheduler('sample_euler')
_lowercase : str = 'A painting of a squirrel eating a burger'
_lowercase : int = torch.manual_seed(0)
_lowercase : int = sd_pipe([prompt], generator=lowerCamelCase, guidance_scale=9.0, num_inference_steps=20, output_type='np')
_lowercase : Union[str, Any] = output.images
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Dict = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : int = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
_lowercase : Optional[int] = sd_pipe.to(lowerCamelCase)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
sd_pipe.set_scheduler('sample_euler')
_lowercase : Any = 'A painting of a squirrel eating a burger'
_lowercase : Any = torch.manual_seed(0)
_lowercase : Union[str, Any] = sd_pipe([prompt], generator=lowerCamelCase, guidance_scale=9.0, num_inference_steps=20, output_type='np')
_lowercase : List[str] = output.images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Tuple = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-1
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Any = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
_lowercase : List[str] = sd_pipe.to(lowerCamelCase)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
sd_pipe.set_scheduler('sample_dpmpp_2m')
_lowercase : Optional[Any] = 'A painting of a squirrel eating a burger'
_lowercase : Optional[Any] = torch.manual_seed(0)
_lowercase : Union[str, Any] = sd_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=7.5, num_inference_steps=15, output_type='np', use_karras_sigmas=lowerCamelCase, )
_lowercase : List[Any] = output.images
_lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Dict = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 89
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowerCAmelCase:
"""simple docstring"""
a : int =PegasusConfig
a : List[str] ={}
a : Optional[int] ='''gelu'''
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=9_9 , _lowerCamelCase=3_2 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=4_0 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , ):
UpperCamelCase_: List[Any] = parent
UpperCamelCase_: Dict = batch_size
UpperCamelCase_: List[str] = seq_length
UpperCamelCase_: List[str] = is_training
UpperCamelCase_: Any = use_labels
UpperCamelCase_: Optional[Any] = vocab_size
UpperCamelCase_: Tuple = hidden_size
UpperCamelCase_: List[Any] = num_hidden_layers
UpperCamelCase_: Any = num_attention_heads
UpperCamelCase_: Optional[Any] = intermediate_size
UpperCamelCase_: Optional[int] = hidden_dropout_prob
UpperCamelCase_: int = attention_probs_dropout_prob
UpperCamelCase_: Union[str, Any] = max_position_embeddings
UpperCamelCase_: Dict = eos_token_id
UpperCamelCase_: Union[str, Any] = pad_token_id
UpperCamelCase_: List[Any] = bos_token_id
def _a ( self ):
UpperCamelCase_: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_: int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_: List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_: Optional[Any] = prepare_pegasus_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[Any] = TFPegasusModel(config=_lowerCamelCase ).get_decoder()
UpperCamelCase_: Optional[int] = inputs_dict['input_ids']
UpperCamelCase_: Optional[int] = input_ids[:1, :]
UpperCamelCase_: int = inputs_dict['attention_mask'][:1, :]
UpperCamelCase_: Optional[int] = inputs_dict['head_mask']
UpperCamelCase_: Optional[int] = 1
# first forward pass
UpperCamelCase_: Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , head_mask=_lowerCamelCase , use_cache=_lowerCamelCase )
UpperCamelCase_ ,UpperCamelCase_: int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_: int = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_: List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase_: Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase_: int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase_: Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
UpperCamelCase_: int = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase_: Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase_: Any = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase_: Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCamelCase , _lowerCamelCase , rtol=1e-3 )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> str:
if attention_mask is None:
UpperCamelCase_: Optional[Any] = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase_: int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_: str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase_: Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase_: int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Tuple =(TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
a : int =(TFPegasusForConditionalGeneration,) if is_tf_available() else ()
a : Tuple =(
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
a : List[str] =True
a : List[str] =False
a : Tuple =False
def _a ( self ):
UpperCamelCase_: Dict = TFPegasusModelTester(self )
UpperCamelCase_: Any = ConfigTester(self , config_class=_lowerCamelCase )
def _a ( self ):
self.config_tester.run_common_tests()
def _a ( self ):
UpperCamelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : Dict =[
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
a : int =[
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
a : Union[str, Any] ='''google/pegasus-xsum'''
@cached_property
def _a ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _a ( self ):
UpperCamelCase_: Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _a ( self , **_lowerCamelCase ):
UpperCamelCase_: Dict = self.translate_src_text(**_lowerCamelCase )
assert self.expected_text == generated_words
def _a ( self , **_lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = self.tokenizer(self.src_text , **_lowerCamelCase , padding=_lowerCamelCase , return_tensors='tf' )
UpperCamelCase_: Any = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_lowerCamelCase , )
UpperCamelCase_: str = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowerCamelCase )
return generated_words
@slow
def _a ( self ):
self._assert_generated_batch_equal_expected()
| 57
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Tuple = logging.get_logger(__name__)
class __A (__magic_name__ ):
snake_case :Optional[int] = "encoder-decoder"
snake_case :List[Any] = True
def __init__( self , **UpperCamelCase_ ):
super().__init__(**__UpperCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
__UpperCAmelCase : str = kwargs.pop("encoder" )
__UpperCAmelCase : Tuple = encoder_config.pop("model_type" )
__UpperCAmelCase : Optional[Any] = kwargs.pop("decoder" )
__UpperCAmelCase : Tuple = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
__UpperCAmelCase : Optional[int] = AutoConfig.for_model(__UpperCamelCase , **__UpperCamelCase )
__UpperCAmelCase : Any = AutoConfig.for_model(__UpperCamelCase , **__UpperCamelCase )
__UpperCAmelCase : Any = True
@classmethod
def _snake_case ( cls , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__UpperCamelCase )
def _snake_case ( self ):
__UpperCAmelCase : str = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Tuple = self.encoder.to_dict()
__UpperCAmelCase : List[Any] = self.decoder.to_dict()
__UpperCAmelCase : Any = self.__class__.model_type
return output
| 706
|
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_a : Tuple = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_a : Dict = {
"Salesforce/codegen-350M-mono": 2048,
}
class __A (__magic_name__ ):
snake_case :Optional[Any] = VOCAB_FILES_NAMES
snake_case :str = PRETRAINED_VOCAB_FILES_MAP
snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Tuple = ["input_ids", "attention_mask"]
snake_case :Dict = CodeGenTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
if kwargs.pop("add_bos_token" , UpperCamelCase_ ):
__UpperCAmelCase : int = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space:
__UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) )
__UpperCAmelCase : Optional[int] = add_prefix_space
__UpperCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = add_prefix_space
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Any = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : str = super().decode(
token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , )
if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0:
__UpperCAmelCase : Union[str, Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
return decoded_text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ )
return m.start() if m else -1
__UpperCAmelCase : List[str] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCAmelCase : Optional[Any] = list(re.finditer("^print" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : List[Any] = completion[: prints[1].start()]
__UpperCAmelCase : Tuple = list(re.finditer("^def" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : Union[str, Any] = completion[: defs[1].start()]
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Dict = [
pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase_ ) > 0:
return completion[: min(UpperCamelCase_ )]
else:
return completion
| 10
| 0
|
from math import factorial
def __a ( __UpperCAmelCase : Dict = 100 ) -> int:
"""simple docstring"""
return sum(int(UpperCamelCase__ ) for x in str(factorial(UpperCamelCase__ ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 488
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657
| 0
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A :List[Any] = DebertaTokenizer
A :Any = True
A :Any = DebertaTokenizerFast
def _A ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__ : Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
a__ : Tuple = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
a__ : List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a__ : Dict = {"unk_token": "[UNK]"}
a__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
def _A ( self , **__UpperCAmelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Any = "lower newer"
a__ : Union[str, Any] = "lower newer"
return input_text, output_text
def _A ( self ):
"""simple docstring"""
a__ : Optional[Any] = self.get_tokenizer()
a__ : str = "lower newer"
a__ : Tuple = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a__ : Any = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
a__ : Union[str, Any] = tokens + [tokenizer.unk_token]
a__ : Optional[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : List[str] = self.get_tokenizer()
a__ : List[Any] = tokenizer("Hello" , "World" )
a__ : Optional[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , __UpperCAmelCase )
@slow
def _A ( self ):
"""simple docstring"""
a__ : Dict = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
a__ : Optional[Any] = tokenizer.encode("sequence builders" , add_special_tokens=__UpperCAmelCase )
a__ : Optional[int] = tokenizer.encode("multi-sequence build" , add_special_tokens=__UpperCAmelCase )
a__ : int = tokenizer.encode(
"sequence builders" , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
a__ : str = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
a__ : Tuple = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
a__ : List[str] = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _A ( self ):
"""simple docstring"""
a__ : List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
a__ : str = tokenizer_class.from_pretrained("microsoft/deberta-base" )
a__ : int = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
a__ : Tuple = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase )
a__ : List[Any] = [tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) for seq in encoding["input_ids"]]
# fmt: off
a__ : Tuple = {
"input_ids": [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
a__ : Optional[Any] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , __UpperCAmelCase )
for expected, decoded in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 207
|
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> float:
a__ : Optional[Any] = 0
while len(__UpperCamelCase ) > 1:
a__ : str = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
a__ : List[str] = files.index(min(__UpperCamelCase ) )
temp += files[min_index]
files.pop(__UpperCamelCase )
files.append(__UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207
| 1
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = IFInpaintingSuperResolutionPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def a__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def a__ ( self : str , A_ : List[str] , A_ : Tuple=0 ) -> Tuple:
"""simple docstring"""
if str(A_ ).startswith('mps' ):
lowerCamelCase_ = torch.manual_seed(A_ )
else:
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(A_ )
lowerCamelCase_ = floats_tensor((1, 3, 16, 16) , rng=random.Random(A_ ) ).to(A_ )
lowerCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
lowerCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
lowerCamelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def a__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 70
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a : Union[str, Any] = b.T
_a : List[str] = np.sum(np.square(UpperCamelCase_ ) , axis=1 )
_a : List[str] = np.sum(np.square(UpperCamelCase_ ) , axis=0 )
_a : Optional[int] = np.matmul(UpperCamelCase_ , UpperCamelCase_ )
_a : Optional[Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a : int = x.reshape(-1 , 3 )
_a : Any = squared_euclidean_distance(UpperCamelCase_ , UpperCamelCase_ )
return np.argmin(UpperCamelCase_ , axis=1 )
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Dict = ['pixel_values']
def __init__( self : List[Any] , __snake_case : Optional[Union[List[List[int]], np.ndarray]] = None , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : bool = True , __snake_case : bool = True , **__snake_case : str , ) -> None:
super().__init__(**__snake_case )
_a : int = size if size is not None else {'''height''': 256, '''width''': 256}
_a : Union[str, Any] = get_size_dict(__snake_case )
_a : Tuple = np.array(__snake_case ) if clusters is not None else None
_a : List[Any] = do_resize
_a : List[str] = size
_a : List[Any] = resample
_a : Optional[Any] = do_normalize
_a : Dict = do_color_quantize
def snake_case_ ( self : Dict , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[Any] , ) -> np.ndarray:
_a : List[str] = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
__snake_case , size=(size['''height'''], size['''width''']) , resample=__snake_case , data_format=__snake_case , **__snake_case )
def snake_case_ ( self : Optional[int] , __snake_case : np.ndarray , __snake_case : Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray:
_a : List[Any] = rescale(image=__snake_case , scale=1 / 127.5 , data_format=__snake_case )
_a : List[Any] = image - 1
return image
def snake_case_ ( self : Optional[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[List[List[int]], np.ndarray]] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **__snake_case : List[Any] , ) -> PIL.Image.Image:
_a : Any = do_resize if do_resize is not None else self.do_resize
_a : Optional[int] = size if size is not None else self.size
_a : List[Any] = get_size_dict(__snake_case )
_a : List[str] = resample if resample is not None else self.resample
_a : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_a : Union[str, Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_a : List[Any] = clusters if clusters is not None else self.clusters
_a : int = np.array(__snake_case )
_a : Optional[int] = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
_a : Any = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
_a : Optional[int] = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_normalize:
_a : Dict = [self.normalize(image=__snake_case ) for image in images]
if do_color_quantize:
_a : Optional[int] = [to_channel_dimension_format(__snake_case , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_a : List[Any] = np.array(__snake_case )
_a : Optional[int] = color_quantize(__snake_case , __snake_case ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_a : int = images.shape[0]
_a : int = images.reshape(__snake_case , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_a : Dict = list(__snake_case )
else:
_a : Optional[Any] = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
_a : int = {'''input_ids''': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 471
| 0
|
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(_SCREAMING_SNAKE_CASE ) * abs(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 705
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[Any] , *,
lowerCamelCase : int = 4 , lowerCamelCase : int = 768 , lowerCamelCase : int , lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__()
__lowercase = nn.Parameter(torch.zeros(lowerCamelCase ) )
# parameters for additional clip time embeddings
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
# parameters for encoder hidden states
__lowercase = clip_extra_context_tokens
__lowercase = nn.Linear(
lowerCamelCase , self.clip_extra_context_tokens * cross_attention_dim )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
__lowercase = nn.LayerNorm(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , *, lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__lowercase = image_embeddings.shape[0]
__lowercase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__lowercase = classifier_free_guidance_embeddings.expand(
lowerCamelCase , -1 )
__lowercase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__lowercase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__lowercase = self.embedding_proj(lowerCamelCase )
__lowercase = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase )
__lowercase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__lowercase = self.clip_extra_context_tokens_proj(lowerCamelCase )
__lowercase = clip_extra_context_tokens.reshape(lowerCamelCase , -1 , self.clip_extra_context_tokens )
__lowercase = clip_extra_context_tokens.permute(0 , 2 , 1 )
__lowercase = self.encoder_hidden_states_proj(lowerCamelCase )
__lowercase = self.text_encoder_hidden_states_norm(lowerCamelCase )
__lowercase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 655
| 0
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__UpperCamelCase : Optional[int] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__UpperCamelCase : Tuple = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__UpperCamelCase : List[str] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__UpperCamelCase : int = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
__UpperCamelCase : Union[str, Any] = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
__UpperCamelCase : Optional[int] = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(64, 64)
)
__UpperCamelCase : Tuple = tf.keras.preprocessing.image.img_to_array(test_image)
__UpperCamelCase : Optional[Any] = np.expand_dims(test_image, axis=0)
__UpperCamelCase : Tuple = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__UpperCamelCase : List[str] = 'Normal'
if result[0][0] == 1:
__UpperCamelCase : Dict = 'Abnormality detected'
| 248
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.functional.normalize(_lowercase )
SCREAMING_SNAKE_CASE : List[str] = nn.functional.normalize(_lowercase )
return torch.mm(_lowercase , normalized_text_embeds.t() )
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = CLIPConfig
UpperCamelCase_ = ["""CLIPEncoderLayer"""]
def __init__( self : str , UpperCamelCase__ : CLIPConfig ):
'''simple docstring'''
super().__init__(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPVisionModel(config.vision_config )
SCREAMING_SNAKE_CASE : Tuple = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.ones(17 ) , requires_grad=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.ones(3 ) , requires_grad=UpperCamelCase__ )
@torch.no_grad()
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.vision_model(UpperCamelCase__ )[1] # pooled_output
SCREAMING_SNAKE_CASE : Any = self.visual_projection(UpperCamelCase__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE : str = cosine_distance(UpperCamelCase__ , self.special_care_embeds ).cpu().float().numpy()
SCREAMING_SNAKE_CASE : Optional[int] = cosine_distance(UpperCamelCase__ , self.concept_embeds ).cpu().float().numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : str = image_embeds.shape[0]
for i in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Dict = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE : Optional[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
SCREAMING_SNAKE_CASE : Dict = special_cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.special_care_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE : Optional[int] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
SCREAMING_SNAKE_CASE : Optional[Any] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
SCREAMING_SNAKE_CASE : Optional[int] = cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE : List[str] = self.concept_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE : Dict = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(UpperCamelCase__ )
result.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.vision_model(UpperCamelCase__ )[1] # pooled_output
SCREAMING_SNAKE_CASE : Union[str, Any] = self.visual_projection(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = cosine_distance(UpperCamelCase__ , self.special_care_embeds )
SCREAMING_SNAKE_CASE : Any = cosine_distance(UpperCamelCase__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE : int = 0.0
SCREAMING_SNAKE_CASE : Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
SCREAMING_SNAKE_CASE : Any = torch.any(special_scores > 0 , dim=1 )
SCREAMING_SNAKE_CASE : Any = special_care * 0.01
SCREAMING_SNAKE_CASE : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
SCREAMING_SNAKE_CASE : List[str] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
SCREAMING_SNAKE_CASE : Tuple = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 248
| 1
|
from __future__ import annotations
def a__ ( a ) -> int:
if not nums:
return 0
A_ : Tuple = nums[0]
A_ : int = 0
for num in nums[1:]:
A_ , A_ : Union[str, Any] = (
max_excluding + num,
max(a , a ),
)
return max(a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 236
|
import heapq
def a__ ( a ) -> set[int]:
A_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(a , [-1 * len(a ), (key, value)] )
# chosen_vertices = set of chosen vertices
A_ : int = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A_ : int = heapq.heappop(a )[1][0]
chosen_vertices.add(a )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A_ : Dict = elem[1][1].index(a )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(a )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 236
| 1
|
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
lowercase_ = threading.Lock()
lowercase_ = None
lowercase_ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
lowercase_ = logging.WARNING
lowercase_ = True
def lowerCAmelCase ():
"""simple docstring"""
_a = os.getenv('''TRANSFORMERS_VERBOSITY''' , __A)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ', '.join(log_levels.keys()) }''')
return _default_log_level
def lowerCAmelCase ():
"""simple docstring"""
return __name__.split('''.''')[0]
def lowerCAmelCase ():
"""simple docstring"""
return logging.getLogger(_get_library_name())
def lowerCAmelCase ():
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_a = logging.StreamHandler() # Set sys.stderr as stream.
_a = sys.stderr.flush
# Apply our default configuration to the library root logger.
_a = _get_library_root_logger()
library_root_logger.addHandler(_default_handler)
library_root_logger.setLevel(_get_default_logging_level())
_a = False
def lowerCAmelCase ():
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_a = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler)
library_root_logger.setLevel(logging.NOTSET)
_a = None
def lowerCAmelCase ():
"""simple docstring"""
return log_levels
def lowerCAmelCase (__A = None):
"""simple docstring"""
if name is None:
_a = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__A)
def lowerCAmelCase ():
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def lowerCAmelCase (__A):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(__A)
def lowerCAmelCase ():
"""simple docstring"""
return set_verbosity(__A)
def lowerCAmelCase ():
"""simple docstring"""
return set_verbosity(__A)
def lowerCAmelCase ():
"""simple docstring"""
return set_verbosity(__A)
def lowerCAmelCase ():
"""simple docstring"""
return set_verbosity(__A)
def lowerCAmelCase ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler)
def lowerCAmelCase ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler)
def lowerCAmelCase (__A):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__A)
def lowerCAmelCase (__A):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__A)
def lowerCAmelCase ():
"""simple docstring"""
_configure_library_root_logger()
_a = False
def lowerCAmelCase ():
"""simple docstring"""
_configure_library_root_logger()
_a = True
def lowerCAmelCase ():
"""simple docstring"""
_a = _get_library_root_logger().handlers
for handler in handlers:
_a = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''')
handler.setFormatter(__A)
def lowerCAmelCase ():
"""simple docstring"""
_a = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__A)
def lowerCAmelCase (self , *__A , **__A):
"""simple docstring"""
_a = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , __A)
if no_advisory_warnings:
return
self.warning(*__A , **__A)
lowercase_ = warning_advice
@functools.lru_cache(__A)
def lowerCAmelCase (self , *__A , **__A):
"""simple docstring"""
self.warning(*__A , **__A)
lowercase_ = warning_once
class __A :
'''simple docstring'''
def __init__(self , *A , **A ) -> str: # pylint: disable=unused-argument
"""simple docstring"""
_a = args[0] if args else None
def __iter__(self ) -> Optional[int]:
"""simple docstring"""
return iter(self._iterator )
def __getattr__(self , A ) -> Optional[int]:
"""simple docstring"""
def empty_fn(*A , **A ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ) -> List[Any]:
"""simple docstring"""
return self
def __exit__(self , A , A , A ) -> Union[str, Any]:
"""simple docstring"""
return
class __A :
'''simple docstring'''
def __call__(self , *A , **A ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*A , **A )
else:
return EmptyTqdm(*A , **A )
def a__ (self , *A , **A ) -> Optional[int]:
"""simple docstring"""
_a = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*A , **A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowercase_ = _tqdm_cls()
def lowerCAmelCase ():
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active)
def lowerCAmelCase ():
"""simple docstring"""
global _tqdm_active
_a = True
hf_hub_utils.enable_progress_bars()
def lowerCAmelCase ():
"""simple docstring"""
global _tqdm_active
_a = False
hf_hub_utils.disable_progress_bars()
| 11
|
__UpperCamelCase = 2_5_6
# Modulus to hash a string
__UpperCamelCase = 1_0_0_0_0_0_3
def UpperCamelCase_( _A :str , _A :str )-> bool:
UpperCamelCase__ = len(_A )
UpperCamelCase__ = len(_A )
if p_len > t_len:
return False
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 1
# Calculating the hash of pattern and substring of text
for i in range(_A ):
UpperCamelCase__ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
UpperCamelCase__ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
UpperCamelCase__ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
UpperCamelCase__ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCamelCase_( )-> None:
UpperCamelCase__ = "abc1abc12"
UpperCamelCase__ = "alskfjaldsabc1abc1abc12k23adsfabcabc"
UpperCamelCase__ = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_A , _A ) and not rabin_karp(_A , _A )
# Test 2)
UpperCamelCase__ = "ABABX"
UpperCamelCase__ = "ABABZABABYABABX"
assert rabin_karp(_A , _A )
# Test 3)
UpperCamelCase__ = "AAAB"
UpperCamelCase__ = "ABAAAAAB"
assert rabin_karp(_A , _A )
# Test 4)
UpperCamelCase__ = "abcdabcy"
UpperCamelCase__ = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_A , _A )
# Test 5)
UpperCamelCase__ = "Lü"
UpperCamelCase__ = "Lüsai"
assert rabin_karp(_A , _A )
UpperCamelCase__ = "Lue"
assert not rabin_karp(_A , _A )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 551
| 0
|
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__SCREAMING_SNAKE_CASE = numpy.array([0, 0])
__SCREAMING_SNAKE_CASE = numpy.array([0.5, 0.8_66_02_54])
__SCREAMING_SNAKE_CASE = numpy.array([1, 0])
__SCREAMING_SNAKE_CASE = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def A_ ( __lowercase , __lowercase ):
UpperCamelCase_ : Optional[Any] =initial_vectors
for _ in range(__lowercase ):
UpperCamelCase_ : List[str] =iteration_step(__lowercase )
return vectors
def A_ ( __lowercase ):
UpperCamelCase_ : Tuple =[]
for i, start_vector in enumerate(vectors[:-1] ):
UpperCamelCase_ : str =vectors[i + 1]
new_vectors.append(__lowercase )
UpperCamelCase_ : Any =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def A_ ( __lowercase , __lowercase ):
UpperCamelCase_ : Any =numpy.radians(__lowercase )
UpperCamelCase_ , UpperCamelCase_ : Tuple =numpy.cos(__lowercase ), numpy.sin(__lowercase )
UpperCamelCase_ : Any =numpy.array(((c, -s), (s, c)) )
return numpy.dot(__lowercase , __lowercase )
def A_ ( __lowercase ):
UpperCamelCase_ : str =plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] =zip(*__lowercase )
plt.plot(__lowercase , __lowercase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 395
|
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def A_ ( __lowercase ):
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def A_ ( __lowercase ):
class a__ :
def __init__( self :int , _lowerCamelCase :Any ):
'''simple docstring'''
UpperCamelCase_ : str =metric_id
class a__ :
UpperCAmelCase__ = [MetricMock(A__ ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def A_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
if "tmp_path" in args:
UpperCamelCase_ : List[Any] =tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(__lowercase , match='https://huggingface.co/docs/evaluate' ):
func(*__lowercase )
| 395
| 1
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=32 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=[10, 20, 30, 40] , _UpperCamelCase=[2, 2, 3, 2] , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=10 , _UpperCamelCase=0.02 , _UpperCamelCase=["stage2", "stage3", "stage4"] , _UpperCamelCase=[2, 3, 4] , _UpperCamelCase=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_stages
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_labels
_UpperCAmelCase = initializer_range
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
_UpperCAmelCase = scope
def UpperCamelCase( self ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = ConvNextVaModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = ConvNextVaForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = ConvNextVaBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCAmelCase = None
_UpperCAmelCase = ConvNextVaBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , unittest.TestCase ):
__A : Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__A : Union[str, Any] = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__A : str = False
__A : Dict = False
__A : Any = False
__A : Tuple = False
__A : Optional[Any] = False
def UpperCamelCase( self ):
_UpperCAmelCase = ConvNextVaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase( self ):
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def UpperCamelCase( self ):
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def UpperCamelCase( self ):
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCAmelCase = True
if model_class.__name__ in [
*get_values(_UpperCamelCase ),
*get_values(_UpperCamelCase ),
]:
continue
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
_UpperCAmelCase = model(**_UpperCamelCase ).loss
loss.backward()
def UpperCamelCase( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCAmelCase = False
_UpperCAmelCase = True
if (
model_class.__name__
in [*get_values(_UpperCamelCase ), *get_values(_UpperCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
_UpperCAmelCase = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
_UpperCAmelCase = model(**_UpperCamelCase ).loss
loss.backward()
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = ConvNextVaModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def A__ ( ) -> str:
"""simple docstring"""
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase( self ):
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = preprocessor(images=_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_UpperCAmelCase = torch.tensor([0.9996, 0.1966, -0.4386] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
| 32
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
snake_case : Optional[int] = WavaVecaForSequenceClassification.from_pretrained(lowercase ,config=lowercase )
snake_case : List[Any] = downstream_dict["""projector.weight"""]
snake_case : Any = downstream_dict["""projector.bias"""]
snake_case : Optional[Any] = downstream_dict["""model.post_net.linear.weight"""]
snake_case : List[str] = downstream_dict["""model.post_net.linear.bias"""]
return model
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[Any]:
snake_case : Union[str, Any] = WavaVecaForAudioFrameClassification.from_pretrained(lowercase ,config=lowercase )
snake_case : Tuple = downstream_dict["""model.linear.weight"""]
snake_case : Optional[int] = downstream_dict["""model.linear.bias"""]
return model
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
snake_case : Tuple = WavaVecaForXVector.from_pretrained(lowercase ,config=lowercase )
snake_case : str = downstream_dict["""connector.weight"""]
snake_case : Tuple = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case : List[Any] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
snake_case : Dict = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
snake_case : Optional[int] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
snake_case : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
snake_case : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
snake_case : str = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
snake_case : Union[str, Any] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> str:
snake_case : Optional[int] = torch.load(lowercase ,map_location="""cpu""" )
snake_case : int = checkpoint["""Downstream"""]
snake_case : str = WavaVecaConfig.from_pretrained(lowercase )
snake_case : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
lowercase ,return_attention_mask=lowercase ,do_normalize=lowercase )
snake_case : str = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
snake_case : Union[str, Any] = convert_classification(lowercase ,lowercase ,lowercase )
elif arch.endswith("""ForAudioFrameClassification""" ):
snake_case : str = convert_diarization(lowercase ,lowercase ,lowercase )
elif arch.endswith("""ForXVector""" ):
snake_case : Any = convert_xvector(lowercase ,lowercase ,lowercase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
snake_case : Tuple = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 587
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ : Union[str, Any] = StableDiffusionXLImgaImgPipeline
UpperCAmelCase_ : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
UpperCAmelCase_ : Dict = PipelineTesterMixin.required_optional_params - {"""latents"""}
UpperCAmelCase_ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__UpperCamelCase , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
snake_case__ = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
snake_case__ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=3_2 , )
snake_case__ = CLIPTextModel(__UpperCamelCase )
snake_case__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__UpperCamelCase )
snake_case__ = CLIPTextModelWithProjection(__UpperCamelCase )
snake_case__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__UpperCamelCase )
snake_case__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __lowerCAmelCase( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Any=0 ):
'''simple docstring'''
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ = image / 2 + 0.5
if str(__UpperCamelCase ).startswith("""mps""" ):
snake_case__ = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def __lowerCAmelCase( self : List[str] ):
'''simple docstring'''
snake_case__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case__ = self.get_dummy_components()
snake_case__ = StableDiffusionXLImgaImgPipeline(**__UpperCamelCase )
snake_case__ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ = sd_pipe(**__UpperCamelCase ).images
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase( self : Any ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __lowerCAmelCase( self : Any ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
pass
def __lowerCAmelCase( self : str ):
'''simple docstring'''
snake_case__ = self.get_dummy_components()
snake_case__ = StableDiffusionXLImgaImgPipeline(**__UpperCamelCase )
snake_case__ = sd_pipe.to(__UpperCamelCase )
snake_case__ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
# forward without prompt embeds
snake_case__ = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ = 3 * ["""this is a negative prompt"""]
snake_case__ = negative_prompt
snake_case__ = 3 * [inputs["""prompt"""]]
snake_case__ = sd_pipe(**__UpperCamelCase )
snake_case__ = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
snake_case__ = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ = 3 * ["""this is a negative prompt"""]
snake_case__ = 3 * [inputs.pop("""prompt""" )]
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = sd_pipe.encode_prompt(__UpperCamelCase , negative_prompt=__UpperCamelCase )
snake_case__ = sd_pipe(
**__UpperCamelCase , prompt_embeds=__UpperCamelCase , negative_prompt_embeds=__UpperCamelCase , pooled_prompt_embeds=__UpperCamelCase , negative_pooled_prompt_embeds=__UpperCamelCase , )
snake_case__ = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __magic_name__( unittest.TestCase ):
def __lowerCAmelCase( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase( self : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int]="cpu" , __UpperCamelCase : Dict=torch.floataa , __UpperCamelCase : Optional[Any]=0 ):
'''simple docstring'''
snake_case__ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ = np.random.RandomState(__UpperCamelCase ).standard_normal((1, 4, 6_4, 6_4) )
snake_case__ = torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase )
snake_case__ = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __lowerCAmelCase( self : int ):
'''simple docstring'''
snake_case__ = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ = self.get_inputs(__UpperCamelCase )
snake_case__ = pipe(**__UpperCamelCase ).images
snake_case__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 566
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
a__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
a__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = ''' Hello world! cécé herlolip'''
a__ = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def snake_case__ ( a ) -> Optional[Any]:
'''simple docstring'''
snake_case__ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(a , a )
def snake_case__ ( a , a , a ) -> List[str]:
'''simple docstring'''
snake_case__ = dct.pop(a )
snake_case__ = val
def snake_case__ ( a ) -> str:
'''simple docstring'''
snake_case__ = torch.load(a , map_location="""cpu""" )
snake_case__ = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def snake_case__ ( a ) -> Dict:
'''simple docstring'''
snake_case__ , snake_case__ = emb.weight.shape
snake_case__ = nn.Linear(a , a , bias=a )
snake_case__ = emb.weight.data
return lin_layer
@torch.no_grad()
def snake_case__ ( a , a , a=None ) -> Optional[int]:
'''simple docstring'''
if not os.path.exists(a ):
snake_case__ = torch.hub.load("""pytorch/fairseq""" , a ).eval()
else:
snake_case__ = load_xsum_checkpoint(a )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
snake_case__ = checkpoint_path.replace(""".""" , """-""" )
snake_case__ = BartConfig.from_pretrained(a )
snake_case__ = bart.encode(a ).unsqueeze(0 )
snake_case__ = BartTokenizer.from_pretrained(a ).encode(a , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(a , a ).all():
raise ValueError(
F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
snake_case__ = bart.state_dict()
remove_ignore_keys_(a )
snake_case__ = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(a , a , a )
snake_case__ = BartForSequenceClassification(a ).eval()
model.load_state_dict(a )
snake_case__ = bart.predict("""mnli""" , a , return_logits=a )
snake_case__ = model(a )[0] # logits
else: # no classification heads to worry about
snake_case__ = bart.model.state_dict()
remove_ignore_keys_(a )
snake_case__ = state_dict["""decoder.embed_tokens.weight"""]
snake_case__ = bart.extract_features(a )
if hf_checkpoint_name == "facebook/bart-large":
snake_case__ = BartModel(a ).eval()
model.load_state_dict(a )
snake_case__ = model(a ).model[0]
else:
snake_case__ = BartForConditionalGeneration(a ).eval() # an existing summarization ckpt
model.model.load_state_dict(a )
if hasattr(a , """lm_head""" ):
snake_case__ = make_linear_from_emb(model.model.shared )
snake_case__ = model.model(a )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
a__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 566
| 1
|
from __future__ import annotations
def UpperCamelCase ( _UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ) -> list[list[str]]:
'''simple docstring'''
_lowercase : Dict = word_bank or []
# create a table
_lowercase : int = len(_UpperCAmelCase ) + 1
_lowercase : list[list[list[str]]] = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
_lowercase : Optional[int] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
_lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 461
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
UpperCamelCase_ : int = logging.get_logger(__name__)
def UpperCamelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Dict=None ) -> Tuple:
'''simple docstring'''
if "." in tensor_name:
_lowercase : Dict = tensor_name.split("." )
for split in splits[:-1]:
_lowercase : Optional[int] = getattr(_UpperCAmelCase , _UpperCAmelCase )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
_lowercase : List[Any] = new_module
_lowercase : Dict = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
_lowercase : Dict = tensor_name in module._buffers
_lowercase : Union[str, Any] = getattr(_UpperCAmelCase , _UpperCAmelCase )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
_lowercase : int = False
_lowercase : str = False
if is_buffer or not is_bitsandbytes_available():
_lowercase : int = False
_lowercase : List[str] = False
else:
_lowercase : List[str] = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
_lowercase : int = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
_lowercase : Optional[int] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_lowercase : int = old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , torch.Tensor ):
_lowercase : Dict = value.to("cpu" )
if value.dtype == torch.inta:
_lowercase : str = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
_lowercase : List[str] = torch.tensor(_UpperCAmelCase , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _UpperCAmelCase ) and fpaa_statistics is None:
_lowercase : List[Any] = new_value.T
_lowercase : List[str] = old_value.__dict__
if is_abit:
_lowercase : Optional[Any] = bnb.nn.IntaParams(_UpperCAmelCase , requires_grad=_UpperCAmelCase , **_UpperCAmelCase ).to(_UpperCAmelCase )
elif is_abit:
_lowercase : Dict = bnb.nn.Paramsabit(_UpperCAmelCase , requires_grad=_UpperCAmelCase , **_UpperCAmelCase ).to(_UpperCAmelCase )
_lowercase : str = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(_UpperCAmelCase ) )
else:
if value is None:
_lowercase : Optional[Any] = old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , torch.Tensor ):
_lowercase : Tuple = value.to(_UpperCAmelCase )
else:
_lowercase : Optional[Any] = torch.tensor(_UpperCAmelCase , device=_UpperCAmelCase )
if is_buffer:
_lowercase : str = new_value
else:
_lowercase : int = nn.Parameter(_UpperCAmelCase , requires_grad=old_value.requires_grad )
_lowercase : str = new_value
def UpperCamelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : str=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Tuple=False ) -> int:
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
_lowercase : int = []
current_key_name.append(_UpperCAmelCase )
if (isinstance(_UpperCAmelCase , nn.Linear ) or isinstance(_UpperCAmelCase , _UpperCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(_UpperCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowercase , _lowercase : Tuple = module.weight.shape
else:
_lowercase : List[Any] = module.in_features
_lowercase : List[str] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
_lowercase : Union[str, Any] = bnb.nn.LinearabitLt(
_UpperCAmelCase , _UpperCAmelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
_lowercase : Tuple = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_lowercase : List[Any] = bnb.nn.Linearabit(
_UpperCAmelCase , _UpperCAmelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
_lowercase : Optional[int] = True
# Store the module class in case we need to transpose the weight later
_lowercase : str = type(_UpperCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_UpperCAmelCase )
if len(list(module.children() ) ) > 0:
_lowercase , _lowercase : List[str] = _replace_with_bnb_linear(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , has_been_replaced=_UpperCAmelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : int=None ) -> Optional[int]:
'''simple docstring'''
_lowercase : str = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
_lowercase , _lowercase : Any = _replace_with_bnb_linear(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def UpperCamelCase ( *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , _UpperCAmelCase , )
return replace_with_bnb_linear(*_UpperCAmelCase , **_UpperCAmelCase )
def UpperCamelCase ( *_UpperCAmelCase : Dict , **_UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , _UpperCAmelCase , )
return set_module_quantized_tensor_to_device(*_UpperCAmelCase , **_UpperCAmelCase )
def UpperCamelCase ( _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_lowercase : Tuple = deepcopy(_UpperCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_lowercase : List[Any] = find_tied_parameters(_UpperCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowercase : Any = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_lowercase : Tuple = sum(_UpperCAmelCase , [] )
_lowercase : List[Any] = len(_UpperCAmelCase ) > 0
# Check if it is a base model
_lowercase : Any = not hasattr(_UpperCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_lowercase : int = list(model.named_children() )
_lowercase : str = [list_modules[-1][0]]
# add last module together with tied weights
_lowercase : Optional[int] = set(_UpperCAmelCase ) - set(_UpperCAmelCase )
_lowercase : Optional[int] = list(set(_UpperCAmelCase ) ) + list(_UpperCAmelCase )
# remove ".weight" from the keys
_lowercase : str = [".weight", ".bias"]
_lowercase : Optional[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_lowercase : Optional[Any] = name.replace(_UpperCAmelCase , "" )
filtered_module_names.append(_UpperCAmelCase )
return filtered_module_names
| 461
| 1
|
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__UpperCamelCase , n - 1 , __UpperCamelCase ) * a) % mod
else:
a_ : Any = binary_exponentiation(__UpperCamelCase , n / 2 , __UpperCamelCase )
return (b * b) % mod
# a prime number
__lowerCamelCase = 701
__lowerCamelCase = 1000000000
__lowerCamelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 715
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class a__ ( lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCamelCase__: str = """convnextv2"""
def __init__( self : Tuple , lowerCamelCase_ : str=3 , lowerCamelCase_ : Tuple=4 , lowerCamelCase_ : Any=4 , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Optional[int]="gelu" , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : str=1E-12 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : Union[str, Any]=2_2_4 , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[Any]=None , **lowerCamelCase_ : List[Any] , ):
super().__init__(**lowerCamelCase_ )
a_ : Dict = num_channels
a_ : Optional[int] = patch_size
a_ : int = num_stages
a_ : Any = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
a_ : Any = [3, 3, 9, 3] if depths is None else depths
a_ : Any = hidden_act
a_ : Dict = initializer_range
a_ : Optional[Any] = layer_norm_eps
a_ : int = drop_path_rate
a_ : Optional[Any] = image_size
a_ : Dict = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
a_ , a_ : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 478
| 0
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
lowercase_ = logging.getLogger(__name__)
@dataclass
class __a :
lowerCamelCase : Optional[Any] =field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase : List[str] =field(
default=snake_case__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCamelCase : List[str] =field(
default=snake_case__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
lowerCamelCase : List[str] =field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase : List[str] =field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCamelCase : int =field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class __a :
lowerCamelCase : Optional[Any] =field(
default=snake_case__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCamelCase : Dict =field(
default=snake_case__ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
lowerCamelCase : int =field(
default=snake_case__ , metadata={'help': 'Train language if it is different from the evaluation language.'} )
lowerCamelCase : List[Any] =field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase : Optional[int] =field(
default=snake_case__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase : Optional[int] =field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase : Optional[Any] =field(
default=snake_case__ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
lowerCamelCase : Any =field(
default=snake_case__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCamelCase : List[Any] =field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase : Dict =field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCamelCase : List[str] =field(
default=snake_case__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , snake_case__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase_ = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
datasets.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCAmelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCAmelCase_ = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCAmelCase_ = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase_ = train_dataset.features["label"].names
if training_args.do_eval:
lowerCAmelCase_ = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase_ = eval_dataset.features["label"].names
if training_args.do_predict:
lowerCAmelCase_ = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase_ = predict_dataset.features["label"].names
# Labels
lowerCAmelCase_ = len(snake_case__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case__ , idalabel={str(snake_case__ ): label for i, label in enumerate(snake_case__ )} , labelaid={label: i for i, label in enumerate(snake_case__ )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase_ = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase_ = False
def preprocess_function(_lowercase : int ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=snake_case__ , max_length=data_args.max_seq_length , truncation=snake_case__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase_ = min(len(snake_case__ ) , data_args.max_train_samples )
lowerCAmelCase_ = train_dataset.select(range(snake_case__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCAmelCase_ = train_dataset.map(
snake_case__ , batched=snake_case__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(snake_case__ ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase_ = min(len(snake_case__ ) , data_args.max_eval_samples )
lowerCAmelCase_ = eval_dataset.select(range(snake_case__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCAmelCase_ = eval_dataset.map(
snake_case__ , batched=snake_case__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCAmelCase_ = min(len(snake_case__ ) , data_args.max_predict_samples )
lowerCAmelCase_ = predict_dataset.select(range(snake_case__ ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
lowerCAmelCase_ = predict_dataset.map(
snake_case__ , batched=snake_case__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
lowerCAmelCase_ = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowercase : str ):
lowerCAmelCase_ = p.predictions[0] if isinstance(p.predictions , snake_case__ ) else p.predictions
lowerCAmelCase_ = np.argmax(snake_case__ , axis=1 )
return metric.compute(predictions=snake_case__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase_ = default_data_collator
elif training_args.fpaa:
lowerCAmelCase_ = DataCollatorWithPadding(snake_case__ , pad_to_multiple_of=8 )
else:
lowerCAmelCase_ = None
# Initialize our Trainer
lowerCAmelCase_ = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=snake_case__ , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
lowerCAmelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase_ = last_checkpoint
lowerCAmelCase_ = trainer.train(resume_from_checkpoint=snake_case__ )
lowerCAmelCase_ = train_result.metrics
lowerCAmelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case__ )
)
lowerCAmelCase_ = min(snake_case__ , len(snake_case__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , snake_case__ )
trainer.save_metrics('''train''' , snake_case__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase_ = trainer.evaluate(eval_dataset=snake_case__ )
lowerCAmelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(snake_case__ )
lowerCAmelCase_ = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics('''eval''' , snake_case__ )
trainer.save_metrics('''eval''' , snake_case__ )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
lowerCAmelCase_ = trainer.predict(snake_case__ , metric_key_prefix='''predict''' )
lowerCAmelCase_ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(snake_case__ )
)
lowerCAmelCase_ = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics('''predict''' , snake_case__ )
trainer.save_metrics('''predict''' , snake_case__ )
lowerCAmelCase_ = np.argmax(snake_case__ , axis=1 )
lowerCAmelCase_ = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(snake_case__ , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(snake_case__ ):
lowerCAmelCase_ = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 552
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_lowercase = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : int = {}
state_dict.pop("pixel_mean" , snake_case__)
state_dict.pop("pixel_std" , snake_case__)
lowerCAmelCase_ : List[Any] = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCAmelCase_ : Dict = key.replace(snake_case__ , snake_case__)
if re.match(snake_case__ , snake_case__):
lowerCAmelCase_ : Any = int(re.match(snake_case__ , snake_case__).group(2))
if layer_nb == 0:
lowerCAmelCase_ : List[Any] = key.replace("layers.0" , "proj_in")
elif layer_nb == 1:
lowerCAmelCase_ : List[Any] = key.replace("layers.1" , "layers.0")
elif layer_nb == 2:
lowerCAmelCase_ : int = key.replace("layers.2" , "proj_out")
lowerCAmelCase_ : int = value
lowerCAmelCase_ : Optional[int] = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__="ybelkada/segment-anything"):
lowerCAmelCase_ : Optional[int] = hf_hub_download(snake_case__ , F'''checkpoints/{model_name}.pth''')
if "sam_vit_b" in model_name:
lowerCAmelCase_ : Optional[Any] = SamConfig()
elif "sam_vit_l" in model_name:
lowerCAmelCase_ : Optional[int] = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCAmelCase_ : Union[str, Any] = SamConfig(
vision_config=snake_case__ , )
elif "sam_vit_h" in model_name:
lowerCAmelCase_ : Optional[Any] = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCAmelCase_ : Tuple = SamConfig(
vision_config=snake_case__ , )
lowerCAmelCase_ : Optional[Any] = torch.load(snake_case__ , map_location="cpu")
lowerCAmelCase_ : Union[str, Any] = replace_keys(snake_case__)
lowerCAmelCase_ : List[Any] = SamImageProcessor()
lowerCAmelCase_ : Any = SamProcessor(image_processor=snake_case__)
lowerCAmelCase_ : Any = SamModel(snake_case__)
hf_model.load_state_dict(snake_case__)
lowerCAmelCase_ : Dict = hf_model.to("cuda")
lowerCAmelCase_ : List[str] = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
lowerCAmelCase_ : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__).raw).convert("RGB")
lowerCAmelCase_ : Optional[int] = [[[4_00, 6_50]]]
lowerCAmelCase_ : int = [[1]]
lowerCAmelCase_ : Optional[Any] = processor(images=np.array(snake_case__) , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = hf_model(**snake_case__)
lowerCAmelCase_ : Optional[int] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
lowerCAmelCase_ : Any = processor(
images=np.array(snake_case__) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = hf_model(**snake_case__)
lowerCAmelCase_ : Union[str, Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
lowerCAmelCase_ : Tuple = ((75, 2_75, 17_25, 8_50),)
lowerCAmelCase_ : Optional[Any] = processor(images=np.array(snake_case__) , input_boxes=snake_case__ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : List[Any] = hf_model(**snake_case__)
lowerCAmelCase_ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
lowerCAmelCase_ : int = [[[4_00, 6_50], [8_00, 6_50]]]
lowerCAmelCase_ : Optional[Any] = [[1, 1]]
lowerCAmelCase_ : List[Any] = processor(
images=np.array(snake_case__) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : Tuple = hf_model(**snake_case__)
lowerCAmelCase_ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
_lowercase = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
_lowercase = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 659
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : str =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] ={
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class A_ ( __a ):
_A :str = '''canine'''
def __init__( self : Any , snake_case__ : List[str]=7_68 , snake_case__ : Optional[Any]=12 , snake_case__ : Dict=12 , snake_case__ : int=30_72 , snake_case__ : Tuple="gelu" , snake_case__ : Dict=0.1 , snake_case__ : int=0.1 , snake_case__ : Tuple=1_63_84 , snake_case__ : Tuple=16 , snake_case__ : Any=0.02 , snake_case__ : Tuple=1E-12 , snake_case__ : List[Any]=0 , snake_case__ : Union[str, Any]=0Xe_0_0_0 , snake_case__ : str=0Xe_0_0_1 , snake_case__ : Optional[int]=4 , snake_case__ : List[Any]=4 , snake_case__ : Optional[Any]=8 , snake_case__ : Tuple=1_63_84 , snake_case__ : Any=1_28 , **snake_case__ : Tuple , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = type_vocab_size
lowercase = layer_norm_eps
# Character config:
lowercase = downsampling_rate
lowercase = upsampling_kernel_size
lowercase = num_hash_functions
lowercase = num_hash_buckets
lowercase = local_transformer_stride
| 72
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple ={
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class A_ ( __a ):
_A :List[str] = '''pix2struct_text_model'''
_A :int = ['''past_key_values''']
_A :Optional[Any] = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : int , snake_case__ : str=5_02_44 , snake_case__ : Dict=7_68 , snake_case__ : Optional[Any]=64 , snake_case__ : Union[str, Any]=20_48 , snake_case__ : Union[str, Any]=12 , snake_case__ : str=12 , snake_case__ : int=32 , snake_case__ : List[Any]=1_28 , snake_case__ : Optional[int]=0.1 , snake_case__ : int=1E-6 , snake_case__ : int=1.0 , snake_case__ : Dict="gelu_new" , snake_case__ : Union[str, Any]=0 , snake_case__ : str=False , snake_case__ : List[str]=0 , snake_case__ : str=1 , snake_case__ : Optional[Any]=False , snake_case__ : Tuple=True , **snake_case__ : List[str] , ):
lowercase = vocab_size
lowercase = hidden_size
lowercase = d_kv
lowercase = d_ff
lowercase = num_layers
lowercase = num_heads
lowercase = relative_attention_num_buckets
lowercase = relative_attention_max_distance
lowercase = dropout_rate
lowercase = layer_norm_epsilon
lowercase = initializer_factor
lowercase = use_cache
lowercase = eos_token_id
lowercase = decoder_start_token_id
# for backwards compatibility
lowercase = dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : int ):
cls._set_token_in_kwargs(snake_case__ )
lowercase , lowercase = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
lowercase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class A_ ( __a ):
_A :Optional[int] = '''pix2struct_vision_model'''
def __init__( self : Tuple , snake_case__ : Union[str, Any]=7_68 , snake_case__ : Any=7_68 , snake_case__ : Dict=20_48 , snake_case__ : int=64 , snake_case__ : str=12 , snake_case__ : Optional[int]=12 , snake_case__ : Union[str, Any]="gelu_new" , snake_case__ : Union[str, Any]=1E-6 , snake_case__ : int=0.0 , snake_case__ : Tuple=0.0 , snake_case__ : Optional[int]=1E-10 , snake_case__ : Optional[int]=1.0 , snake_case__ : Optional[Any]=40_96 , snake_case__ : Optional[int]=32 , snake_case__ : List[Any]=1_28 , **snake_case__ : Union[str, Any] , ):
super().__init__(**snake_case__ )
lowercase = hidden_size
lowercase = patch_embed_hidden_size
lowercase = d_ff
lowercase = dropout_rate
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = initializer_range
lowercase = initializer_factor
lowercase = attention_dropout
lowercase = layer_norm_eps
lowercase = dense_act_fn
lowercase = seq_len
lowercase = relative_attention_num_buckets
lowercase = relative_attention_max_distance
lowercase = d_kv
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : int ):
cls._set_token_in_kwargs(snake_case__ )
lowercase , lowercase = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
lowercase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class A_ ( __a ):
_A :int = '''pix2struct'''
_A :str = True
def __init__( self : Optional[int] , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : List[Any]=1.0 , snake_case__ : Any=0.02 , snake_case__ : Tuple=False , snake_case__ : Union[str, Any]=False , snake_case__ : Tuple=True , **snake_case__ : int , ):
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowercase = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
lowercase = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
lowercase = PixaStructTextConfig(**snake_case__ )
lowercase = PixaStructVisionConfig(**snake_case__ )
lowercase = self.text_config.decoder_start_token_id
lowercase = self.text_config.pad_token_id
lowercase = self.text_config.eos_token_id
lowercase = initializer_factor
lowercase = initializer_range
lowercase = self.initializer_range
lowercase = self.initializer_range
lowercase = is_vqa
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , snake_case__ : PixaStructTextConfig , snake_case__ : PixaStructVisionConfig , **snake_case__ : Any ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.text_config.to_dict()
lowercase = self.vision_config.to_dict()
lowercase = self.__class__.model_type
return output
| 72
| 1
|
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCAmelCase__ (snake_case__ : int = 8 ):
"""simple docstring"""
_snake_case : Dict = ascii_letters + digits + punctuation
return "".join(secrets.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : int ):
"""simple docstring"""
i -= len(UpperCamelCase__ )
_snake_case : Union[str, Any] = i // 3
_snake_case : int = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_snake_case : Optional[Any] = (
chars_incl
+ random(UpperCamelCase__ , quotient + remainder )
+ random(UpperCamelCase__ , UpperCamelCase__ )
+ random(UpperCamelCase__ , UpperCamelCase__ )
)
_snake_case : Optional[int] = list(UpperCamelCase__ )
shuffle(UpperCamelCase__ )
return "".join(UpperCamelCase__ )
# random is a generalised function for letters, characters and numbers
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : int ):
"""simple docstring"""
return "".join(secrets.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
pass # Put your code here...
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Any ):
"""simple docstring"""
pass # Put your code here...
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : str ):
"""simple docstring"""
pass # Put your code here...
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : int = 8 ):
"""simple docstring"""
if len(UpperCamelCase__ ) < min_length:
# Your Password must be at least 8 characters long
return False
_snake_case : Union[str, Any] = any(char in ascii_uppercase for char in password )
_snake_case : int = any(char in ascii_lowercase for char in password )
_snake_case : Any = any(char in digits for char in password )
_snake_case : Dict = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[Any] = int(input("""Please indicate the max length of your password: """ ).strip() )
_snake_case : Any = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(UpperCamelCase__ ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(UpperCamelCase__ , UpperCamelCase__ ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 609
|
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> bool:
"""simple docstring"""
__lowerCamelCase = 0
for ch in input_str:
__lowerCamelCase = ord(UpperCamelCase__ )
__lowerCamelCase = pow(2 , UpperCamelCase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 469
| 0
|
"""simple docstring"""
def UpperCamelCase ( _A : Dict = 10 ) -> str:
if not isinstance(_A , _A ) or n < 0:
raise ValueError("""Invalid input""" )
lowercase : List[str] = 10**n
lowercase : Optional[int] = 28_433 * (pow(2 , 7_830_457 , _A )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'{solution(10) = }')
| 706
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase ( _A , _A , _A = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase : Optional[int] = tau * frequency / samplerate
lowercase : Union[str, Any] = sin(_A )
lowercase : Tuple = cos(_A )
lowercase : Any = _sin / (2 * q_factor)
lowercase : Any = (1 - _cos) / 2
lowercase : List[str] = 1 - _cos
lowercase : int = 1 + alpha
lowercase : Optional[int] = -2 * _cos
lowercase : str = 1 - alpha
lowercase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase ( _A , _A , _A = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase : Union[str, Any] = tau * frequency / samplerate
lowercase : Dict = sin(_A )
lowercase : List[Any] = cos(_A )
lowercase : str = _sin / (2 * q_factor)
lowercase : Any = (1 + _cos) / 2
lowercase : Dict = -1 - _cos
lowercase : Tuple = 1 + alpha
lowercase : Tuple = -2 * _cos
lowercase : Any = 1 - alpha
lowercase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase ( _A , _A , _A = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase : Optional[int] = tau * frequency / samplerate
lowercase : Optional[int] = sin(_A )
lowercase : Any = cos(_A )
lowercase : str = _sin / (2 * q_factor)
lowercase : Optional[int] = _sin / 2
lowercase : Dict = 0
lowercase : Any = -ba
lowercase : Any = 1 + alpha
lowercase : Union[str, Any] = -2 * _cos
lowercase : Any = 1 - alpha
lowercase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase ( _A , _A , _A = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase : Optional[int] = tau * frequency / samplerate
lowercase : List[Any] = sin(_A )
lowercase : Tuple = cos(_A )
lowercase : Any = _sin / (2 * q_factor)
lowercase : Dict = 1 - alpha
lowercase : int = -2 * _cos
lowercase : Optional[Any] = 1 + alpha
lowercase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase ( _A , _A , _A , _A = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase : Optional[Any] = tau * frequency / samplerate
lowercase : Optional[int] = sin(_A )
lowercase : Dict = cos(_A )
lowercase : Optional[Any] = _sin / (2 * q_factor)
lowercase : int = 10 ** (gain_db / 40)
lowercase : str = 1 + alpha * big_a
lowercase : str = -2 * _cos
lowercase : Optional[int] = 1 - alpha * big_a
lowercase : Optional[Any] = 1 + alpha / big_a
lowercase : Tuple = -2 * _cos
lowercase : List[Any] = 1 - alpha / big_a
lowercase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase ( _A , _A , _A , _A = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase : Optional[Any] = tau * frequency / samplerate
lowercase : str = sin(_A )
lowercase : str = cos(_A )
lowercase : Optional[int] = _sin / (2 * q_factor)
lowercase : List[Any] = 10 ** (gain_db / 40)
lowercase : Optional[Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase : Optional[Any] = (big_a + 1) + (big_a - 1) * _cos
lowercase : List[Any] = (big_a - 1) - (big_a + 1) * _cos
lowercase : Dict = (big_a - 1) + (big_a + 1) * _cos
lowercase : int = 2 * sqrt(_A ) * alpha
lowercase : Union[str, Any] = big_a * (pmc + aaa)
lowercase : int = 2 * big_a * mpc
lowercase : Optional[Any] = big_a * (pmc - aaa)
lowercase : Tuple = ppmc + aaa
lowercase : int = -2 * pmpc
lowercase : Optional[int] = ppmc - aaa
lowercase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase ( _A , _A , _A , _A = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase : Optional[Any] = tau * frequency / samplerate
lowercase : Union[str, Any] = sin(_A )
lowercase : List[Any] = cos(_A )
lowercase : str = _sin / (2 * q_factor)
lowercase : List[str] = 10 ** (gain_db / 40)
lowercase : str = (big_a + 1) - (big_a - 1) * _cos
lowercase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
lowercase : str = (big_a - 1) - (big_a + 1) * _cos
lowercase : str = (big_a - 1) + (big_a + 1) * _cos
lowercase : int = 2 * sqrt(_A ) * alpha
lowercase : int = big_a * (ppmc + aaa)
lowercase : Optional[Any] = -2 * big_a * pmpc
lowercase : Tuple = big_a * (ppmc - aaa)
lowercase : Union[str, Any] = pmc + aaa
lowercase : List[Any] = 2 * mpc
lowercase : Optional[Any] = pmc - aaa
lowercase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 348
| 0
|
def A ( snake_case__ : int = 10**12 ) -> Tuple:
'''simple docstring'''
__snake_case = 1
__snake_case = 0
__snake_case = 1
__snake_case = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F"""{solution() = }""")
| 313
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = KandinskyVaaImgaImgPipeline
lowerCAmelCase_ : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """image"""]
lowerCAmelCase_ : List[str] = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
lowerCAmelCase_ : Any = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase_ : Optional[Any] = False
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return 1_00
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase__ = UNetaDConditionModel(**_UpperCAmelCase )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.dummy_unet
UpperCAmelCase__ = self.dummy_movq
UpperCAmelCase__ = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
UpperCAmelCase__ = DDIMScheduler(**_UpperCAmelCase )
UpperCAmelCase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str=0 ):
"""simple docstring"""
UpperCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
UpperCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCAmelCase )
# create init_image
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
UpperCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(_UpperCAmelCase ).startswith("""mps""" ):
UpperCAmelCase__ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCAmelCase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCAmelCase__ = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = """cpu"""
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**_UpperCAmelCase )
UpperCAmelCase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
UpperCAmelCase__ = output.images
UpperCAmelCase__ = pipe(
**self.get_dummy_inputs(_UpperCAmelCase ) , return_dict=_UpperCAmelCase , )[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
UpperCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
UpperCAmelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCAmelCase__ = """A red cartoon frog, 4k"""
UpperCAmelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCAmelCase )
UpperCAmelCase__ = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
UpperCAmelCase__ = pipeline.to(_UpperCAmelCase )
pipeline.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ = pipe_prior(
_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCAmelCase__ = pipeline(
image=_UpperCAmelCase , image_embeds=_UpperCAmelCase , negative_image_embeds=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , )
UpperCAmelCase__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 603
| 0
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class UpperCamelCase_ :
pass
| 541
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCamelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCamelCase__ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = ['''pixel_values''']
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = 1 / 255 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> None:
super().__init__(**lowerCAmelCase_ )
_snake_case = size if size is not None else {'shortest_edge': 256}
_snake_case = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_snake_case = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_snake_case = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
_snake_case = do_resize
_snake_case = size
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = resample
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = offset
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
_snake_case = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" in size:
_snake_case = get_resize_output_image_size(lowerCAmelCase_ , size['shortest_edge'] , default_to_square=lowerCAmelCase_ )
elif "height" in size and "width" in size:
_snake_case = (size['height'], size['width'])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
_snake_case = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> int:
_snake_case = image.astype(np.floataa )
if offset:
_snake_case = image - (scale / 2)
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
_snake_case = to_numpy_array(lowerCAmelCase_ )
if do_resize:
_snake_case = self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ )
if do_center_crop:
_snake_case = self.center_crop(lowerCAmelCase_ , size=lowerCAmelCase_ )
if do_rescale:
_snake_case = self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ , offset=lowerCAmelCase_ )
if do_normalize:
_snake_case = self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ )
_snake_case = to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ )
return image
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ) -> PIL.Image.Image:
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = offset if offset is not None else self.offset
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
_snake_case = make_batched(lowerCAmelCase_ )
_snake_case = [
[
self._preprocess_image(
image=lowerCAmelCase_ , do_resize=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , do_center_crop=lowerCAmelCase_ , crop_size=lowerCAmelCase_ , do_rescale=lowerCAmelCase_ , rescale_factor=lowerCAmelCase_ , offset=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ , image_mean=lowerCAmelCase_ , image_std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , )
for img in video
]
for video in videos
]
_snake_case = {'pixel_values': videos}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 541
| 1
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
lowerCAmelCase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert("RGB" )
return image
def lowerCamelCase__ ( __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
lowerCAmelCase_ = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def lowerCamelCase__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase_ = dct.pop(__lowerCAmelCase )
lowerCAmelCase_ = val
def lowerCamelCase__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCAmelCase_ = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
lowerCAmelCase_ = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
lowerCAmelCase_ = torch.cat((q_bias, torch.zeros_like(__lowerCAmelCase , requires_grad=__lowerCAmelCase ), v_bias) )
lowerCAmelCase_ = qkv_bias
def lowerCamelCase__ ( __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
lowerCAmelCase_ = 364 if "coco" in model_name else 224
lowerCAmelCase_ = InstructBlipVisionConfig(image_size=__lowerCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowerCAmelCase_ = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCAmelCase_ = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowerCAmelCase_ = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32001 ).to_dict()
elif "vicuna-13b" in model_name:
lowerCAmelCase_ = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32001 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowerCAmelCase_ = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict()
lowerCAmelCase_ = InstructBlipConfig(vision_config=__lowerCAmelCase , text_config=__lowerCAmelCase , qformer_config=__lowerCAmelCase )
return config, image_size
@torch.no_grad()
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=False ):
"""simple docstring"""
lowerCAmelCase_ = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
lowerCAmelCase_ = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowerCAmelCase_ = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
lowerCAmelCase_ , lowerCAmelCase_ = get_blipa_config(__lowerCAmelCase )
lowerCAmelCase_ = InstructBlipForConditionalGeneration(__lowerCAmelCase ).eval()
lowerCAmelCase_ = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
lowerCAmelCase_ , lowerCAmelCase_ = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
lowerCAmelCase_ = "cuda:1" if torch.cuda.is_available() else "cpu"
lowerCAmelCase_ = "cuda:2" if torch.cuda.is_available() else "cpu"
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = load_model_and_preprocess(
name=__lowerCAmelCase , model_type=__lowerCAmelCase , is_eval=__lowerCAmelCase , device=__lowerCAmelCase )
original_model.eval()
print("Done!" )
# update state dict keys
lowerCAmelCase_ = original_model.state_dict()
lowerCAmelCase_ = create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCAmelCase_ = state_dict.pop(__lowerCAmelCase )
if key.startswith("Qformer.bert" ):
lowerCAmelCase_ = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
lowerCAmelCase_ = key.replace("self" , "attention" )
if "llm_proj" in key:
lowerCAmelCase_ = key.replace("llm_proj" , "language_projection" )
if "t5_proj" in key:
lowerCAmelCase_ = key.replace("t5_proj" , "language_projection" )
if key.startswith("llm_model" ):
lowerCAmelCase_ = key.replace("llm_model" , "language_model" )
if key.startswith("t5" ):
lowerCAmelCase_ = key.replace("t5" , "language" )
lowerCAmelCase_ = val
# read in qv biases
read_in_q_v_bias(__lowerCAmelCase , __lowerCAmelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
lowerCAmelCase_ = load_demo_image()
lowerCAmelCase_ = "What is unusual about this image?"
# create processor
lowerCAmelCase_ = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase )
lowerCAmelCase_ = InstructBlipProcessor(
image_processor=__lowerCAmelCase , tokenizer=__lowerCAmelCase , qformer_tokenizer=__lowerCAmelCase , )
lowerCAmelCase_ = processor(images=__lowerCAmelCase , text=__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
# make sure processor creates exact same pixel values
lowerCAmelCase_ = vis_processors["eval"](__lowerCAmelCase ).unsqueeze(0 ).to(__lowerCAmelCase )
lowerCAmelCase_ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __lowerCAmelCase )
original_model.to(__lowerCAmelCase )
hf_model.to(__lowerCAmelCase )
with torch.no_grad():
if "vicuna" in model_name:
lowerCAmelCase_ = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
lowerCAmelCase_ = hf_model(**__lowerCAmelCase ).logits
else:
lowerCAmelCase_ = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
lowerCAmelCase_ = tokenizer("\n" , return_tensors="pt" ).input_ids.to(__lowerCAmelCase )
lowerCAmelCase_ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
lowerCAmelCase_ = hf_model(**__lowerCAmelCase , labels=__lowerCAmelCase ).logits
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowerCAmelCase_ = 1e-4 if "vicuna" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , __lowerCAmelCase , atol=__lowerCAmelCase )
print("Looks ok!" )
print("Generating with original model..." )
lowerCAmelCase_ = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
lowerCAmelCase_ = hf_model.generate(
**__lowerCAmelCase , do_sample=__lowerCAmelCase , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowerCAmelCase_ = 2
print("Original generation:" , __lowerCAmelCase )
lowerCAmelCase_ = processor.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
lowerCAmelCase_ = [text.strip() for text in output_text]
print("HF generation:" , __lowerCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
_A = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_A = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 290
|
from collections.abc import Callable
def lowerCamelCase__ ( __lowerCAmelCase : Callable[[float], float] , __lowerCAmelCase : float , __lowerCAmelCase : float ):
"""simple docstring"""
lowerCAmelCase_ = a
lowerCAmelCase_ = b
if function(__lowerCAmelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(__lowerCAmelCase ) == 0:
return b
elif (
function(__lowerCAmelCase ) * function(__lowerCAmelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
lowerCAmelCase_ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__lowerCAmelCase ) == 0:
return mid
elif function(__lowerCAmelCase ) * function(__lowerCAmelCase ) < 0:
lowerCAmelCase_ = mid
else:
lowerCAmelCase_ = mid
lowerCAmelCase_ = start + (end - start) / 2.0
return mid
def lowerCamelCase__ ( __lowerCAmelCase : float ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 290
| 1
|
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase_ : Dict = sys.version_info >= (3, 10)
def A_ ( _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Tuple=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_lowerCAmelCase )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = None
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'titi'
lowerCAmelCase_ = 'toto'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'titi'
lowerCAmelCase_ = 'toto'
lowerCAmelCase_ = 42
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = "toto"
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : List[Any] = BasicEnum(self.foo )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = "toto"
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Tuple = MixedTypeEnum(self.foo )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = field(default=A , metadata={'help': 'help message'} )
lowerCAmelCase_ = None
lowerCAmelCase_ = list_field(default=[] )
lowerCAmelCase_ = list_field(default=[] )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = list_field(default=[] )
lowerCAmelCase_ = list_field(default=[1, 2, 3] )
lowerCAmelCase_ = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
lowerCAmelCase_ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field()
lowerCAmelCase_ = field()
lowerCAmelCase_ = field()
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Union[str, Any] = BasicEnum(self.required_enum )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = field()
lowerCAmelCase_ = None
lowerCAmelCase_ = field(default='toto' , metadata={'help': 'help message'} )
lowerCAmelCase_ = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = None
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = field(default=A , metadata={'help': 'help message'} )
lowerCAmelCase_ = None
lowerCAmelCase_ = list_field(default=[] )
lowerCAmelCase_ = list_field(default=[] )
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : int,__A : argparse.ArgumentParser,__A : argparse.ArgumentParser ):
self.assertEqual(len(a._actions ),len(b._actions ) )
for x, y in zip(a._actions,b._actions ):
_lowerCamelCase : List[str] = {k: v for k, v in vars(__A ).items() if k != "container"}
_lowerCamelCase : List[Any] = {k: v for k, v in vars(__A ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices",__A ) and yy.get("choices",__A ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](__A ),yy["type"](__A ) )
del xx["type"], yy["type"]
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[int] = HfArgumentParser(__A )
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("--foo",type=__A,required=__A )
expected.add_argument("--bar",type=__A,required=__A )
expected.add_argument("--baz",type=__A,required=__A )
expected.add_argument("--flag",type=__A,default=__A,const=__A,nargs="?" )
self.argparsersEqual(__A,__A )
_lowerCamelCase : Any = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((_lowerCamelCase) , ) : Tuple = parser.parse_args_into_dataclasses(__A,look_for_args_file=__A )
self.assertFalse(example.flag )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[int] = HfArgumentParser(__A )
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
expected.add_argument("--foo",default=4_2,type=__A )
expected.add_argument("--baz",default="toto",type=__A,help="help message" )
self.argparsersEqual(__A,__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
expected.add_argument("--foo",type=__A,default=__A,const=__A,nargs="?" )
expected.add_argument("--baz",type=__A,default=__A,const=__A,nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz",action="store_false",default=__A,dest="baz" )
expected.add_argument("--opt",type=__A,default=__A )
_lowerCamelCase : Tuple = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__A )
for dataclass_type in dataclass_types:
_lowerCamelCase : str = HfArgumentParser(__A )
self.argparsersEqual(__A,__A )
_lowerCamelCase : Optional[Any] = parser.parse_args([] )
self.assertEqual(__A,Namespace(foo=__A,baz=__A,opt=__A ) )
_lowerCamelCase : Dict = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(__A,Namespace(foo=__A,baz=__A,opt=__A ) )
_lowerCamelCase : Optional[Any] = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(__A,Namespace(foo=__A,baz=__A,opt=__A ) )
_lowerCamelCase : Optional[int] = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(__A,Namespace(foo=__A,baz=__A,opt=__A ) )
_lowerCamelCase : List[str] = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(__A,Namespace(foo=__A,baz=__A,opt=__A ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[str] = HfArgumentParser(__A )
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
"--foo",default="toto",choices=["titi", "toto", 4_2],type=make_choice_type_function(["titi", "toto", 4_2] ),)
self.argparsersEqual(__A,__A )
_lowerCamelCase : Optional[Any] = parser.parse_args([] )
self.assertEqual(args.foo,"toto" )
_lowerCamelCase : str = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo,MixedTypeEnum.toto )
_lowerCamelCase : int = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo,"titi" )
_lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo,MixedTypeEnum.titi )
_lowerCamelCase : Optional[int] = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo,4_2 )
_lowerCamelCase : Any = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo,MixedTypeEnum.fourtytwo )
def lowerCamelCase_ ( self : Union[str, Any] ):
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = "toto"
_lowerCamelCase : int = HfArgumentParser(__A )
_lowerCamelCase : str = argparse.ArgumentParser()
expected.add_argument(
"--foo",default="toto",choices=("titi", "toto", 4_2),type=make_choice_type_function(["titi", "toto", 4_2] ),)
self.argparsersEqual(__A,__A )
_lowerCamelCase : Any = parser.parse_args([] )
self.assertEqual(args.foo,"toto" )
_lowerCamelCase : Optional[Any] = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo,"titi" )
_lowerCamelCase : int = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo,4_2 )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = HfArgumentParser(__A )
_lowerCamelCase : Any = argparse.ArgumentParser()
expected.add_argument("--foo_int",nargs="+",default=[],type=__A )
expected.add_argument("--bar_int",nargs="+",default=[1, 2, 3],type=__A )
expected.add_argument("--foo_str",nargs="+",default=["Hallo", "Bonjour", "Hello"],type=__A )
expected.add_argument("--foo_float",nargs="+",default=[0.1, 0.2, 0.3],type=__A )
self.argparsersEqual(__A,__A )
_lowerCamelCase : Union[str, Any] = parser.parse_args([] )
self.assertEqual(
__A,Namespace(foo_int=[],bar_int=[1, 2, 3],foo_str=["Hallo", "Bonjour", "Hello"],foo_float=[0.1, 0.2, 0.3] ),)
_lowerCamelCase : Any = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(__A,Namespace(foo_int=[1],bar_int=[2, 3],foo_str=["a", "b", "c"],foo_float=[0.1, 0.7] ) )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("--foo",default=__A,type=__A )
expected.add_argument("--bar",default=__A,type=__A,help="help message" )
expected.add_argument("--baz",default=__A,type=__A )
expected.add_argument("--ces",nargs="+",default=[],type=__A )
expected.add_argument("--des",nargs="+",default=[],type=__A )
_lowerCamelCase : Optional[int] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__A )
for dataclass_type in dataclass_types:
_lowerCamelCase : Any = HfArgumentParser(__A )
self.argparsersEqual(__A,__A )
_lowerCamelCase : Union[str, Any] = parser.parse_args([] )
self.assertEqual(__A,Namespace(foo=__A,bar=__A,baz=__A,ces=[],des=[] ) )
_lowerCamelCase : Any = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(__A,Namespace(foo=1_2,bar=3.14,baz="42",ces=["a", "b", "c"],des=[1, 2, 3] ) )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Optional[Any] = HfArgumentParser(__A )
_lowerCamelCase : str = argparse.ArgumentParser()
expected.add_argument("--required_list",nargs="+",type=__A,required=__A )
expected.add_argument("--required_str",type=__A,required=__A )
expected.add_argument(
"--required_enum",type=make_choice_type_function(["titi", "toto"] ),choices=["titi", "toto"],required=__A,)
self.argparsersEqual(__A,__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = HfArgumentParser(__A )
_lowerCamelCase : int = argparse.ArgumentParser()
expected.add_argument("--foo",type=__A,required=__A )
expected.add_argument(
"--required_enum",type=make_choice_type_function(["titi", "toto"] ),choices=["titi", "toto"],required=__A,)
expected.add_argument("--opt",type=__A,default=__A )
expected.add_argument("--baz",default="toto",type=__A,help="help message" )
expected.add_argument("--foo_str",nargs="+",default=["Hallo", "Bonjour", "Hello"],type=__A )
self.argparsersEqual(__A,__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Tuple = HfArgumentParser(__A )
_lowerCamelCase : Optional[Any] = {
"foo": 1_2,
"bar": 3.14,
"baz": "42",
"flag": True,
}
_lowerCamelCase : Tuple = parser.parse_dict(__A )[0]
_lowerCamelCase : str = BasicExample(**__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Tuple = HfArgumentParser(__A )
_lowerCamelCase : str = {
"foo": 1_2,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 4_2,
}
self.assertRaises(__A,parser.parse_dict,__A,allow_extra_keys=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[int] = HfArgumentParser(__A )
_lowerCamelCase : List[str] = {
"foo": 1_2,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Dict = os.path.join(__A,"temp_json" )
os.mkdir(__A )
with open(temp_local_path + ".json","w+" ) as f:
json.dump(__A,__A )
_lowerCamelCase : Tuple = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
_lowerCamelCase : List[Any] = BasicExample(**__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Union[str, Any] = HfArgumentParser(__A )
_lowerCamelCase : List[str] = {
"foo": 1_2,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Tuple = os.path.join(__A,"temp_yaml" )
os.mkdir(__A )
with open(temp_local_path + ".yaml","w+" ) as f:
yaml.dump(__A,__A )
_lowerCamelCase : List[str] = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
_lowerCamelCase : Optional[int] = BasicExample(**__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[str] = HfArgumentParser(__A )
self.assertIsNotNone(__A )
| 11
|
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 11
| 1
|
"""simple docstring"""
from math import pi
def _lowerCamelCase ( __a, __a ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 626
|
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCAmelCase__ = get_logger(__name__)
def _lowerCamelCase ( __a, __a, __a, __a, __a=0 ):
os.makedirs(__a, exist_ok=__a )
with FSDP.state_dict_type(
__a, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a )
if accelerator.process_index == 0:
logger.info(F'Saving model to {output_model_file}' )
torch.save(__a, __a )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a )
logger.info(F'Saving model to {output_model_file}' )
torch.save(__a, __a )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE_ = os.path.join(__a, F'{MODEL_NAME}_{model_index}' )
os.makedirs(__a, exist_ok=__a )
logger.info(F'Saving model to {ckpt_dir}' )
SCREAMING_SNAKE_CASE_ = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=__a, storage_writer=dist_cp.FileSystemWriter(__a ), planner=DefaultSavePlanner(), )
logger.info(F'Model saved to {ckpt_dir}' )
def _lowerCamelCase ( __a, __a, __a, __a, __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__a ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
SCREAMING_SNAKE_CASE_ = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a )
logger.info(F'Loading model from {input_model_file}' )
SCREAMING_SNAKE_CASE_ = torch.load(__a )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a )
logger.info(F'Loading model from {input_model_file}' )
SCREAMING_SNAKE_CASE_ = torch.load(__a )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE_ = (
os.path.join(__a, F'{MODEL_NAME}_{model_index}' )
if F'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading model from {ckpt_dir}' )
SCREAMING_SNAKE_CASE_ = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__a, storage_reader=dist_cp.FileSystemReader(__a ), planner=DefaultLoadPlanner(), )
SCREAMING_SNAKE_CASE_ = state_dict['''model''']
logger.info(F'Model loaded from {ckpt_dir}' )
model.load_state_dict(__a )
def _lowerCamelCase ( __a, __a, __a, __a, __a, __a=0 ):
os.makedirs(__a, exist_ok=__a )
with FSDP.state_dict_type(
__a, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE_ = FSDP.optim_state_dict(__a, __a )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
SCREAMING_SNAKE_CASE_ = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a )
logger.info(F'Saving Optimizer state to {output_optimizer_file}' )
torch.save(__a, __a )
logger.info(F'Optimizer state saved in {output_optimizer_file}' )
else:
SCREAMING_SNAKE_CASE_ = os.path.join(__a, F'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(__a, exist_ok=__a )
logger.info(F'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state}, storage_writer=dist_cp.FileSystemWriter(__a ), planner=DefaultSavePlanner(), )
logger.info(F'Optimizer state saved in {ckpt_dir}' )
def _lowerCamelCase ( __a, __a, __a, __a, __a, __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
SCREAMING_SNAKE_CASE_ = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a )
logger.info(F'Loading Optimizer state from {input_optimizer_file}' )
SCREAMING_SNAKE_CASE_ = torch.load(__a )
logger.info(F'Optimizer state loaded from {input_optimizer_file}' )
else:
SCREAMING_SNAKE_CASE_ = (
os.path.join(__a, F'{OPTIMIZER_NAME}_{optimizer_index}' )
if F'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading Optimizer from {ckpt_dir}' )
SCREAMING_SNAKE_CASE_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict(), optimizer_key='''optimizer''', storage_reader=dist_cp.FileSystemReader(__a ), )
SCREAMING_SNAKE_CASE_ = optim_state['''optimizer''']
logger.info(F'Optimizer loaded from {ckpt_dir}' )
SCREAMING_SNAKE_CASE_ = FSDP.optim_state_dict_to_load(__a, __a, __a )
optimizer.load_state_dict(__a )
| 626
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class _UpperCamelCase ( A_ ):
'''simple docstring'''
lowerCamelCase : str = 'data2vec-text'
def __init__( self : Tuple , __lowercase : str=3_05_22 , __lowercase : Optional[Any]=7_68 , __lowercase : Any=12 , __lowercase : List[Any]=12 , __lowercase : Optional[Any]=30_72 , __lowercase : List[Any]="gelu" , __lowercase : List[str]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=5_12 , __lowercase : Optional[Any]=2 , __lowercase : Any=0.02 , __lowercase : Union[str, Any]=1e-12 , __lowercase : int=1 , __lowercase : Optional[Any]=0 , __lowercase : Any=2 , __lowercase : List[str]="absolute" , __lowercase : List[str]=True , __lowercase : Tuple=None , **__lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class _UpperCamelCase ( A_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 486
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : List[Any] = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 486
| 1
|
from sklearn.metrics import matthews_corrcoef
import datasets
_lowercase = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
_lowercase = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
_lowercase = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) ,reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
] ,)
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[Any]=None ) -> Tuple:
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(lowerCAmelCase__ ,lowerCAmelCase__ ,sample_weight=lowerCAmelCase__ ) ),
}
| 659
|
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowerCamelCase_ ( ):
lowercase : Any = torch.nn.Linear(2 , 4 )
lowercase : int = torch.optim.AdamW(model.parameters() , lr=1.0 )
lowercase : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(UpperCAmelCase_ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
lowercase : List[Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
lowercase : Optional[int] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowerCamelCase_ ( UpperCAmelCase_ : Any ):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ):
lowercase : List[str] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(UpperCAmelCase_ )
class UpperCAmelCase ( __lowerCamelCase ):
@require_cuda
def _lowerCAmelCase ( self : Optional[Any] ):
lowercase : List[Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(lowerCAmelCase ):
lowercase : Optional[Any] = Accelerator(cpu=lowerCAmelCase )
def _lowerCAmelCase ( self : Union[str, Any] ):
lowercase : int = Accelerator()
lowercase : Union[str, Any] = GradientState()
assert state.num_steps == 1
lowercase : Tuple = 4
assert state.num_steps == 4
assert state.sync_gradients is True
lowercase : Tuple = False
assert state.sync_gradients is False
GradientState._reset_state()
def _lowerCAmelCase ( self : Union[str, Any] ):
lowercase : List[str] = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[int] = create_components()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = accelerator.prepare(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def _lowerCAmelCase ( self : int ):
lowercase : Optional[Any] = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] = create_components()
accelerator.prepare(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def _lowerCAmelCase ( self : Dict ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*lowerCAmelCase : Any , **lowerCAmelCase : Optional[int] ):
pass
with patch('''torch.cuda.set_device''' , lowerCAmelCase ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
lowercase : Optional[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def _lowerCAmelCase ( self : Union[str, Any] ):
lowercase : Tuple = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase : int = create_components()
accelerator.prepare(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
lowercase : List[Any] = get_signature(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCAmelCase )
# make sure random weights don't match
load_random_weights(lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase ) ) < 1E-3 )
def _lowerCAmelCase ( self : Optional[Any] ):
lowercase : int = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase : List[str] = create_components()
accelerator.prepare(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
lowercase : Tuple = get_signature(lowerCAmelCase )
# saving hook
def save_config(lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : int ):
lowercase : Optional[int] = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(lowerCAmelCase , '''data.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase , lowerCAmelCase )
# loading hook
def load_config(lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
with open(os.path.join(lowerCAmelCase , '''data.json''' ) , '''r''' ) as f:
lowercase : Dict = json.load(lowerCAmelCase )
lowercase : Union[str, Any] = config['''class_name''']
lowercase : str = accelerator.register_save_state_pre_hook(lowerCAmelCase )
lowercase : Tuple = accelerator.register_load_state_pre_hook(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCAmelCase )
# make sure random weights don't match with hooks
load_random_weights(lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
lowercase : List[Any] = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCAmelCase )
# make sure random weights don't match with hooks removed
load_random_weights(lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
lowercase : Any = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def _lowerCAmelCase ( self : Union[str, Any] ):
lowercase : List[Any] = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[int] = create_components()
lowercase : int = None
# This should work
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase : int = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.assertTrue(dummy_obj is None )
def _lowerCAmelCase ( self : Dict ):
lowercase : str = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase : List[str] = create_components()
lowercase : List[str] = [1, 2, 3]
# This should work
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase : Union[str, Any] = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.assertEqual(
getattr(lowerCAmelCase , '''_is_accelerate_prepared''' , lowerCAmelCase ) , lowerCAmelCase , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(lowerCAmelCase , '''_is_accelerate_prepared''' , lowerCAmelCase ) , lowerCAmelCase , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(lowerCAmelCase , '''_is_accelerate_prepared''' , lowerCAmelCase ) , lowerCAmelCase , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(lowerCAmelCase , '''_is_accelerate_prepared''' , lowerCAmelCase ) , lowerCAmelCase , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(lowerCAmelCase , '''_is_accelerate_prepared''' , lowerCAmelCase ) , lowerCAmelCase , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(lowerCAmelCase , '''_is_accelerate_prepared''' , lowerCAmelCase ) , lowerCAmelCase , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def _lowerCAmelCase ( self : Optional[int] ):
from transformers import AutoModelForCausalLM
lowercase : Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=lowerCAmelCase , device_map={'''''': 0} , )
lowercase : List[str] = Accelerator()
# This should work
lowercase : Any = accelerator.prepare(lowerCAmelCase )
@slow
@require_bnb
def _lowerCAmelCase ( self : Optional[Any] ):
from transformers import AutoModelForCausalLM
lowercase : Optional[int] = Accelerator()
with init_empty_weights():
lowercase : str = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
lowercase : Optional[Any] = infer_auto_device_map(lowerCAmelCase )
lowercase : str = '''cpu'''
lowercase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=lowerCAmelCase , load_in_abit=lowerCAmelCase , llm_inta_enable_fpaa_cpu_offload=lowerCAmelCase )
# This should not work and get value error
with self.assertRaises(lowerCAmelCase ):
lowercase : Optional[Any] = accelerator.prepare(lowerCAmelCase )
@slow
@require_bnb
@require_multi_gpu
def _lowerCAmelCase ( self : Union[str, Any] ):
from transformers import AutoModelForCausalLM
lowercase : Tuple = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
lowercase : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
lowercase : int = infer_auto_device_map(lowerCAmelCase )
lowercase : Tuple = 1
lowercase : Tuple = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=lowerCAmelCase , device_map=lowerCAmelCase , )
lowercase : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(lowerCAmelCase ):
lowercase : str = accelerator.prepare(lowerCAmelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def _lowerCAmelCase ( self : str ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
lowercase : Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
lowercase : Any = infer_auto_device_map(lowerCAmelCase )
lowercase : Any = 1
lowercase : int = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=lowerCAmelCase , device_map=lowerCAmelCase , )
lowercase : List[str] = Accelerator()
# This should work
lowercase : List[Any] = accelerator.prepare(lowerCAmelCase )
@require_cuda
def _lowerCAmelCase ( self : str ):
lowercase : int = torch.nn.Linear(10 , 10 )
lowercase : Optional[Any] = torch.optim.SGD(model.parameters() , lr=0.01 )
lowercase : Union[str, Any] = Accelerator(cpu=lowerCAmelCase )
lowercase : Union[str, Any] = accelerator.prepare(lowerCAmelCase )
| 583
| 0
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__SCREAMING_SNAKE_CASE : Tuple =logging.getLogger(__name__)
def UpperCamelCase__ ( lowerCAmelCase__=2 ,lowerCAmelCase__=3 ,lowerCAmelCase__=16 ,lowerCAmelCase__ = 10 ,lowerCAmelCase__ = 2 ):
def get_dataset(lowerCAmelCase__ ):
lowercase = torch.randn(batch_size * n_batches ,1 )
return TensorDataset(lowerCAmelCase__ ,a * x + b + 0.1 * torch.randn(batch_size * n_batches ,1 ) )
lowercase = get_dataset(lowerCAmelCase__ )
lowercase = get_dataset(lowerCAmelCase__ )
lowercase = DataLoader(lowerCAmelCase__ ,shuffle=lowerCAmelCase__ ,batch_size=lowerCAmelCase__ ,num_workers=4 )
lowercase = DataLoader(lowerCAmelCase__ ,shuffle=lowerCAmelCase__ ,batch_size=lowerCAmelCase__ ,num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowercase = []
for epoch in range(lowerCAmelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
lowercase , lowercase = batch
lowercase = model(lowerCAmelCase__ )
lowercase = torch.nn.functional.mse_loss(lowerCAmelCase__ ,lowerCAmelCase__ )
accelerator.backward(lowerCAmelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class A_ ( nn.Module ):
def __init__( self : Any ):
super().__init__()
lowercase = nn.Parameter(torch.randn(1 ) )
lowercase = nn.Parameter(torch.randn(1 ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : Any ):
return x * self.a + self.b
class A_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : int ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase = DummyModel()
lowercase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase , lowercase = dummy_dataloaders()
lowercase = ProjectConfiguration(total_limit=1 , project_dir=snake_case__ , automatic_checkpoint_naming=snake_case__ )
# Train baseline
lowercase = Accelerator(project_config=snake_case__ )
lowercase , lowercase , lowercase , lowercase = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase = DummyModel()
lowercase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase , lowercase = dummy_dataloaders()
# Train baseline
lowercase = Accelerator()
lowercase , lowercase , lowercase , lowercase = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save initial
lowercase = os.path.join(snake_case__ , """initial""" )
accelerator.save_state(snake_case__ )
((lowercase) , (lowercase)) = model.a.item(), model.b.item()
lowercase = optimizer.state_dict()
lowercase = train(3 , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
((lowercase) , (lowercase)) = model.a.item(), model.b.item()
lowercase = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase = DummyModel()
lowercase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase , lowercase = dummy_dataloaders()
lowercase = Accelerator()
lowercase , lowercase , lowercase , lowercase = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
accelerator.load_state(snake_case__ )
((lowercase) , (lowercase)) = model.a.item(), model.b.item()
lowercase = optimizer.state_dict()
self.assertEqual(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
lowercase = train(2 , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save everything
lowercase = os.path.join(snake_case__ , """checkpoint""" )
accelerator.save_state(snake_case__ )
# Load everything back in and make sure all states work
accelerator.load_state(snake_case__ )
test_rands += train(1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
((lowercase) , (lowercase)) = model.a.item(), model.b.item()
lowercase = optimizer.state_dict()
self.assertEqual(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase = DummyModel()
lowercase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase , lowercase = dummy_dataloaders()
lowercase = ProjectConfiguration(automatic_checkpoint_naming=snake_case__ )
# Train baseline
lowercase = Accelerator(project_dir=snake_case__ , project_config=snake_case__ )
lowercase , lowercase , lowercase , lowercase = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save initial
accelerator.save_state()
((lowercase) , (lowercase)) = model.a.item(), model.b.item()
lowercase = optimizer.state_dict()
lowercase = train(3 , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
((lowercase) , (lowercase)) = model.a.item(), model.b.item()
lowercase = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase = DummyModel()
lowercase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase , lowercase = dummy_dataloaders()
lowercase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=snake_case__ )
lowercase = Accelerator(project_dir=snake_case__ , project_config=snake_case__ )
lowercase , lowercase , lowercase , lowercase = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
accelerator.load_state(os.path.join(snake_case__ , """checkpoints""" , """checkpoint_0""" ) )
((lowercase) , (lowercase)) = model.a.item(), model.b.item()
lowercase = optimizer.state_dict()
self.assertEqual(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
lowercase = train(2 , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(snake_case__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
((lowercase) , (lowercase)) = model.a.item(), model.b.item()
lowercase = optimizer.state_dict()
self.assertEqual(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowercase = torch.tensor([1, 2, 3] )
lowercase = torch.tensor([2, 3, 4] )
lowercase = DummyModel()
lowercase = torch.optim.Adam(net.parameters() )
lowercase = Accelerator()
with self.assertRaises(snake_case__ ) as ve:
accelerator.register_for_checkpointing(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase = DummyModel()
lowercase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase = torch.optim.lr_scheduler.StepLR(snake_case__ , step_size=1 , gamma=0.99 )
lowercase , lowercase = dummy_dataloaders()
lowercase = ProjectConfiguration(automatic_checkpoint_naming=snake_case__ )
# Train baseline
lowercase = Accelerator(project_dir=snake_case__ , project_config=snake_case__ )
lowercase , lowercase , lowercase , lowercase , lowercase = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save initial
accelerator.save_state()
lowercase = scheduler.state_dict()
train(3 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.assertNotEqual(snake_case__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(snake_case__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(snake_case__ , scheduler.state_dict() )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase = DummyModel()
lowercase = ProjectConfiguration(automatic_checkpoint_naming=snake_case__ , total_limit=2 )
# Train baseline
lowercase = Accelerator(project_dir=snake_case__ , project_config=snake_case__ )
lowercase = accelerator.prepare(snake_case__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(snake_case__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowercase = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Any ='''/tmp/accelerate/state_checkpointing'''
__SCREAMING_SNAKE_CASE : Optional[Any] =DummyModel()
__SCREAMING_SNAKE_CASE : int =torch.optim.Adam(params=model.parameters(), lr=1E-3)
__SCREAMING_SNAKE_CASE : List[Any] =torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] =dummy_dataloaders()
__SCREAMING_SNAKE_CASE : Dict =ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__SCREAMING_SNAKE_CASE : Union[str, Any] =Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] =accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str =accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__SCREAMING_SNAKE_CASE : Dict =group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
__SCREAMING_SNAKE_CASE : Optional[int] =model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
__SCREAMING_SNAKE_CASE : str =group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
__SCREAMING_SNAKE_CASE : Optional[int] =group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 72
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__SCREAMING_SNAKE_CASE : Any =logging.get_logger(__name__)
# General docstring
__SCREAMING_SNAKE_CASE : Union[str, Any] ='''PoolFormerConfig'''
# Base docstring
__SCREAMING_SNAKE_CASE : List[Any] ='''sail/poolformer_s12'''
__SCREAMING_SNAKE_CASE : Union[str, Any] =[1, 512, 7, 7]
# Image classification docstring
__SCREAMING_SNAKE_CASE : Any ='''sail/poolformer_s12'''
__SCREAMING_SNAKE_CASE : Union[str, Any] ='''tabby, tabby cat'''
__SCREAMING_SNAKE_CASE : Tuple =[
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 0.0 ,lowerCAmelCase__ = False ):
if drop_prob == 0.0 or not training:
return input
lowercase = 1 - drop_prob
lowercase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
lowercase = keep_prob + torch.rand(lowerCAmelCase__ ,dtype=input.dtype ,device=input.device )
random_tensor.floor_() # binarize
lowercase = input.div(lowerCAmelCase__ ) * random_tensor
return output
class A_ ( nn.Module ):
def __init__( self : Union[str, Any] , snake_case__ : Optional[float] = None ):
super().__init__()
lowercase = drop_prob
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : torch.Tensor ):
return drop_path(snake_case__ , self.drop_prob , self.training )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return "p={}".format(self.drop_prob )
class A_ ( nn.Module ):
def __init__( self : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : str , snake_case__ : List[str]=None ):
super().__init__()
lowercase = patch_size if isinstance(snake_case__ , collections.abc.Iterable ) else (patch_size, patch_size)
lowercase = stride if isinstance(snake_case__ , collections.abc.Iterable ) else (stride, stride)
lowercase = padding if isinstance(snake_case__ , collections.abc.Iterable ) else (padding, padding)
lowercase = nn.Convad(snake_case__ , snake_case__ , kernel_size=snake_case__ , stride=snake_case__ , padding=snake_case__ )
lowercase = norm_layer(snake_case__ ) if norm_layer else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : List[Any] ):
lowercase = self.projection(snake_case__ )
lowercase = self.norm(snake_case__ )
return embeddings
class A_ ( nn.GroupNorm ):
def __init__( self : Union[str, Any] , snake_case__ : Dict , **snake_case__ : List[str] ):
super().__init__(1 , snake_case__ , **snake_case__ )
class A_ ( nn.Module ):
def __init__( self : int , snake_case__ : Any ):
super().__init__()
lowercase = nn.AvgPoolad(snake_case__ , stride=1 , padding=pool_size // 2 , count_include_pad=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : Union[str, Any] ):
return self.pool(snake_case__ ) - hidden_states
class A_ ( nn.Module ):
def __init__( self : int , snake_case__ : Any , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Dict ):
super().__init__()
lowercase = nn.Convad(snake_case__ , snake_case__ , 1 )
lowercase = nn.Convad(snake_case__ , snake_case__ , 1 )
lowercase = PoolFormerDropPath(snake_case__ )
if isinstance(config.hidden_act , snake_case__ ):
lowercase = ACTaFN[config.hidden_act]
else:
lowercase = config.hidden_act
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : Dict ):
lowercase = self.conva(snake_case__ )
lowercase = self.act_fn(snake_case__ )
lowercase = self.drop(snake_case__ )
lowercase = self.conva(snake_case__ )
lowercase = self.drop(snake_case__ )
return hidden_states
class A_ ( nn.Module ):
def __init__( self : int , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[str] ):
super().__init__()
lowercase = PoolFormerPooling(snake_case__ )
lowercase = PoolFormerOutput(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase = PoolFormerGroupNorm(snake_case__ )
lowercase = PoolFormerGroupNorm(snake_case__ )
# Useful for training neural nets
lowercase = PoolFormerDropPath(snake_case__ ) if drop_path > 0.0 else nn.Identity()
lowercase = config.use_layer_scale
if config.use_layer_scale:
lowercase = nn.Parameter(
config.layer_scale_init_value * torch.ones((snake_case__) ) , requires_grad=snake_case__ )
lowercase = nn.Parameter(
config.layer_scale_init_value * torch.ones((snake_case__) ) , requires_grad=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : List[str] ):
if self.use_layer_scale:
lowercase = self.pooling(self.before_norm(snake_case__ ) )
lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
lowercase = hidden_states + self.drop_path(snake_case__ )
lowercase = ()
lowercase = self.output(self.after_norm(snake_case__ ) )
lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
lowercase = hidden_states + self.drop_path(snake_case__ )
lowercase = (output,) + outputs
return outputs
else:
lowercase = self.drop_path(self.pooling(self.before_norm(snake_case__ ) ) )
# First residual connection
lowercase = pooling_output + hidden_states
lowercase = ()
# Second residual connection inside the PoolFormerOutput block
lowercase = self.drop_path(self.output(self.after_norm(snake_case__ ) ) )
lowercase = hidden_states + layer_output
lowercase = (output,) + outputs
return outputs
class A_ ( nn.Module ):
def __init__( self : List[str] , snake_case__ : Optional[Any] ):
super().__init__()
lowercase = config
# stochastic depth decay rule
lowercase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
lowercase = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
lowercase = nn.ModuleList(snake_case__ )
# Transformer blocks
lowercase = []
lowercase = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
lowercase = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
snake_case__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(snake_case__ ) )
lowercase = nn.ModuleList(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True ):
lowercase = () if output_hidden_states else None
lowercase = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
lowercase , lowercase = layers
# Get patch embeddings from hidden_states
lowercase = embedding_layer(snake_case__ )
# Send the embeddings through the blocks
for _, blk in enumerate(snake_case__ ):
lowercase = blk(snake_case__ )
lowercase = layer_outputs[0]
if output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ )
class A_ ( __a ):
_A :Any = PoolFormerConfig
_A :int = '''poolformer'''
_A :Union[str, Any] = '''pixel_values'''
_A :str = True
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Union[str, Any] ):
if isinstance(snake_case__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(snake_case__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Any , snake_case__ : Optional[int]=False ):
if isinstance(snake_case__ , snake_case__ ):
lowercase = value
__SCREAMING_SNAKE_CASE : Optional[Any] =R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__SCREAMING_SNAKE_CASE : str =R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , __a , )
class A_ ( __a ):
def __init__( self : Union[str, Any] , snake_case__ : int ):
super().__init__(snake_case__ )
lowercase = config
lowercase = PoolFormerEncoder(snake_case__ )
# Initialize weights and apply final processing
self.post_init()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , ):
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
lowercase = self.encoder(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , )
lowercase = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=snake_case__ , hidden_states=encoder_outputs.hidden_states , )
class A_ ( nn.Module ):
def __init__( self : List[str] , snake_case__ : Optional[int] ):
super().__init__()
lowercase = nn.Linear(config.hidden_size , config.hidden_size )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : str ):
lowercase = self.dense(snake_case__ )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , __a , )
class A_ ( __a ):
def __init__( self : Dict , snake_case__ : Any ):
super().__init__(snake_case__ )
lowercase = config.num_labels
lowercase = PoolFormerModel(snake_case__ )
# Final norm
lowercase = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
lowercase = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[torch.LongTensor] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , ):
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.poolformer(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , )
lowercase = outputs[0]
lowercase = self.classifier(self.norm(snake_case__ ).mean([-2, -1] ) )
lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase = """single_label_classification"""
else:
lowercase = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase = MSELoss()
if self.num_labels == 1:
lowercase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase = loss_fct(snake_case__ , snake_case__ )
elif self.config.problem_type == "single_label_classification":
lowercase = CrossEntropyLoss()
lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase = BCEWithLogitsLoss()
lowercase = loss_fct(snake_case__ , snake_case__ )
if not return_dict:
lowercase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
| 72
| 1
|
__snake_case = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__snake_case = [None] * 10_000_000
__snake_case = True
__snake_case = False
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase = chain(next_number(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase = number_chain
while number < 10_000_000:
UpperCamelCase = number_chain
number *= 10
return number_chain
def _lowercase ( SCREAMING_SNAKE_CASE_ : int = 10_000_000 ):
"""simple docstring"""
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 386
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
lowercase = GPTSwaTokenizer
lowercase = False
lowercase = True
lowercase = False
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = GPTSwaTokenizer(__magic_name__ , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : List[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = """This is a test"""
UpperCamelCase = """This is a test"""
return input_text, output_text
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = """<s>"""
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__magic_name__ ) , 2_0_0_0 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = GPTSwaTokenizer(__magic_name__ )
UpperCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
UpperCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
__magic_name__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
UpperCamelCase = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens(__magic_name__ )
# fmt: off
self.assertListEqual(
__magic_name__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = GPTSwaTokenizer(__magic_name__ )
UpperCamelCase = ["""This is a test""", """I was born in 92000, and this is falsé."""]
UpperCamelCase = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__magic_name__ , __magic_name__ ):
self.assertListEqual(tokenizer.encode_fast(__magic_name__ ) , __magic_name__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(tokenizer.decode_fast(__magic_name__ ) , __magic_name__ )
@slow
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
UpperCamelCase = {"""input_ids""": [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=__magic_name__ , )
| 386
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( __lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = 4_2
UpperCAmelCase__ : List[str] = 4_2
def __init__( self , A_ , A_ ) -> Tuple:
super().__init__()
self.register_modules(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = 2000 , A_ = None , A_ = "pil" , A_ = True , **A_ , ) -> Union[ImagePipelineOutput, Tuple]:
__UpperCamelCase =self.unet.config.sample_size
__UpperCamelCase =(batch_size, 3, img_size, img_size)
__UpperCamelCase =self.unet
__UpperCamelCase =randn_tensor(_UpperCamelCase , generator=_UpperCamelCase ) * self.scheduler.init_noise_sigma
__UpperCamelCase =sample.to(self.device )
self.scheduler.set_timesteps(_UpperCamelCase )
self.scheduler.set_sigmas(_UpperCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCamelCase =self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCamelCase =self.unet(_UpperCamelCase , _UpperCamelCase ).sample
__UpperCamelCase =self.scheduler.step_correct(_UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
# prediction step
__UpperCamelCase =model(_UpperCamelCase , _UpperCamelCase ).sample
__UpperCamelCase =self.scheduler.step_pred(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase )
__UpperCamelCase =output.prev_sample, output.prev_sample_mean
__UpperCamelCase =sample_mean.clamp(0 , 1 )
__UpperCamelCase =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase =self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 715
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , ) -> List[str]:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =2
__UpperCamelCase =99
__UpperCamelCase =0
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase ='last'
__UpperCamelCase =True
__UpperCamelCase =None
__UpperCamelCase =0
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase =None
if self.use_input_lengths:
__UpperCamelCase =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Any:
__UpperCamelCase =TFFlaubertModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertWithLMHeadModel(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertForQuestionAnsweringSimple(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =TFFlaubertForSequenceClassification(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFFlaubertForTokenClassification(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFFlaubertForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self ) -> Dict:
__UpperCamelCase =TFFlaubertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , emb_dim=37 )
def _a ( self ) -> Dict:
self.config_tester.run_common_tests()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def _a ( self ) -> Optional[int]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> int:
__UpperCamelCase =TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase =tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
__UpperCamelCase =tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 682
| 0
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 327
|
import unittest
from transformers import DonutProcessor
lowercase : Optional[int] = "naver-clova-ix/donut-base"
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Tuple = DonutProcessor.from_pretrained(__UpperCamelCase )
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Tuple = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
__UpperCamelCase : int = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
__UpperCamelCase : List[str] = self.processor.tokenajson(__UpperCamelCase )
self.assertDictEqual(__UpperCamelCase , __UpperCamelCase )
| 327
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 599
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = "facebook/bart-large-mnli"
SCREAMING_SNAKE_CASE : int = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
SCREAMING_SNAKE_CASE : Union[str, Any] = "text_classifier"
SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer
SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : Union[str, Any] = ["text", ["text"]]
SCREAMING_SNAKE_CASE : Dict = ["text"]
def lowerCamelCase ( self ):
super().setup()
UpperCAmelCase__ : Tuple = self.model.config
UpperCAmelCase__ : Dict = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
UpperCAmelCase__ : Optional[Any] = int(_UpperCAmelCase )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ : Optional[int] = labels
return self.pre_processor(
[text] * len(_UpperCAmelCase ) , [F"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def lowerCamelCase ( self , _UpperCAmelCase ):
UpperCAmelCase__ : Tuple = outputs.logits
UpperCAmelCase__ : Optional[int] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 599
| 1
|
"""simple docstring"""
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ = "" , UpperCamelCase__ = False ) -> Any:
'''simple docstring'''
snake_case : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
snake_case : Tuple = is_leaf
snake_case : Tuple = prefix
def lowerCamelCase ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
snake_case : str = 0
for q, w in zip(self.prefix , _A ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowerCamelCase ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
for word in words:
self.insert(_A )
def lowerCamelCase ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
if self.prefix == word:
snake_case : Optional[int] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
snake_case : Tuple = RadixNode(prefix=_A , is_leaf=_A )
else:
snake_case : Optional[int] = self.nodes[word[0]]
snake_case : Optional[Any] = incoming_node.match(
_A )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_A )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
snake_case : List[str] = remaining_prefix
snake_case : str = self.nodes[matching_string[0]]
snake_case : List[Any] = RadixNode(_A , _A )
snake_case : Tuple = aux_node
if remaining_word == "":
snake_case : int = True
else:
self.nodes[matching_string[0]].insert(_A )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = self.nodes.get(word[0] , _A )
if not incoming_node:
return False
else:
snake_case : str = incoming_node.match(
_A )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_A )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
snake_case : Tuple = self.nodes.get(word[0] , _A )
if not incoming_node:
return False
else:
snake_case : Optional[int] = incoming_node.match(
_A )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_A )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
snake_case : Dict = list(self.nodes.values() )[0]
snake_case : int = merging_node.is_leaf
self.prefix += merging_node.prefix
snake_case : str = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
snake_case : Union[str, Any] = False
# If there is 1 edge, we merge it with its child
else:
snake_case : int = list(incoming_node.nodes.values() )[0]
snake_case : Optional[int] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
snake_case : Optional[Any] = merging_node.nodes
return True
def lowerCamelCase ( self , UpperCamelCase__ = 0 ) -> str:
'''simple docstring'''
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Any = "banana bananas bandana band apple all beast".split()
snake_case : int = RadixNode()
root.insert_many(lowercase )
assert all(root.find(lowercase ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def __lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
assert test_trie()
def __lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
snake_case : List[str] = RadixNode()
snake_case : Any = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(lowercase )
print("Words:" , lowercase )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 178
|
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = len(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE_ : Optional[int] = True
for i in range(lowerCAmelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE_ : int = True
if a[i].islower():
SCREAMING_SNAKE_CASE_ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216
| 0
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_a: int = """<<<<<<< This should probably be modified because it mentions: """
_a: Union[str, Any] = """=======
>>>>>>>
"""
_a: int = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
_a: int = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def __lowerCAmelCase ( A ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __UpperCamelCase ( lowercase ):
@staticmethod
def __A ( lowerCAmelCase : ArgumentParser ):
'''simple docstring'''
UpperCAmelCase_ = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=lowerCAmelCase , required=lowerCAmelCase , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=lowerCAmelCase , required=lowerCAmelCase , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__( self : Any , lowerCAmelCase : str , lowerCAmelCase : str , *lowerCAmelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = get_logger("datasets-cli/converting" )
UpperCAmelCase_ = tfds_path
UpperCAmelCase_ = datasets_directory
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
UpperCAmelCase_ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
UpperCAmelCase_ = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
UpperCAmelCase_ = os.path.abspath(self._datasets_directory )
self._logger.info(F"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
if os.path.isdir(self._tfds_path ):
UpperCAmelCase_ = os.listdir(lowerCAmelCase )
else:
UpperCAmelCase_ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"Looking at file {f_name}" )
UpperCAmelCase_ = os.path.join(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(lowerCAmelCase , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = []
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = []
for line in lines:
UpperCAmelCase_ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
UpperCAmelCase_ = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
UpperCAmelCase_ = ""
continue
elif "from absl import logging" in out_line:
UpperCAmelCase_ = "from datasets import logging\n"
elif "getLogger" in out_line:
UpperCAmelCase_ = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
UpperCAmelCase_ = True
UpperCAmelCase_ = list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + "\n" )
out_lines.append(lowerCAmelCase )
out_lines.append(lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
UpperCAmelCase_ = re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
UpperCAmelCase_ = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
UpperCAmelCase_ = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
UpperCAmelCase_ = True
out_lines.append(lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
UpperCAmelCase_ = f_name.replace(".py" , "" )
UpperCAmelCase_ = os.path.join(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = os.path.join(lowerCAmelCase , lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
self._logger.info(F"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.writelines(lowerCAmelCase )
self._logger.info(F"Converted in {output_file}" )
for utils_file in utils_files:
try:
UpperCAmelCase_ = os.path.basename(lowerCAmelCase )
UpperCAmelCase_ = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(F"Moving {dest_folder} to {utils_file}" )
shutil.copy(lowerCAmelCase , lowerCAmelCase )
except KeyError:
self._logger.error(F"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 268
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a: List[Any] = logging.get_logger(__name__)
_a: List[str] = """▁"""
_a: Union[str, Any] = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
_a: Tuple = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
_a: Optional[int] = {
"""facebook/s2t-small-librispeech-asr""": 1024,
}
_a: List[Any] = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
_a: Tuple = {"""mustc""": MUSTC_LANGS}
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = MAX_MODEL_INPUT_SIZES
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE__ = []
def __init__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple="<s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Any="<pad>" , lowerCAmelCase : Union[str, Any]="<unk>" , lowerCAmelCase : Tuple=False , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : Any , ):
'''simple docstring'''
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , do_upper_case=lowerCAmelCase , do_lower_case=lowerCAmelCase , tgt_lang=lowerCAmelCase , lang_codes=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
UpperCAmelCase_ = do_upper_case
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = load_json(lowerCAmelCase )
UpperCAmelCase_ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ = spm_file
UpperCAmelCase_ = load_spm(lowerCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
UpperCAmelCase_ = lang_codes
UpperCAmelCase_ = LANGUAGES[lang_codes]
UpperCAmelCase_ = [F"<lang:{lang}>" for lang in self.langs]
UpperCAmelCase_ = {lang: self.sp_model.PieceToId(F"<lang:{lang}>" ) for lang in self.langs}
UpperCAmelCase_ = self.lang_tokens
UpperCAmelCase_ = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
UpperCAmelCase_ = {}
@property
def __A ( self : List[Any] ):
'''simple docstring'''
return len(self.encoder )
@property
def __A ( self : Any ):
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def __A ( self : int , lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = new_tgt_lang
self.set_tgt_lang_special_tokens(lowerCAmelCase )
def __A ( self : List[Any] , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = self.lang_code_to_id[tgt_lang]
UpperCAmelCase_ = [lang_code_id]
def __A ( self : List[str] , lowerCAmelCase : str ):
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def __A ( self : str , lowerCAmelCase : int ):
'''simple docstring'''
return self.encoder.get(lowerCAmelCase , self.encoder[self.unk_token] )
def __A ( self : Optional[Any] , lowerCAmelCase : int ):
'''simple docstring'''
return self.decoder.get(lowerCAmelCase , self.unk_token )
def __A ( self : Dict , lowerCAmelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
UpperCAmelCase_ = self.sp_model.decode(lowerCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
UpperCAmelCase_ = []
else:
current_sub_tokens.append(lowerCAmelCase )
UpperCAmelCase_ = self.sp_model.decode(lowerCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __A ( self : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __A ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = [1] * len(self.prefix_tokens )
UpperCAmelCase_ = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones
def __A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self : Tuple , lowerCAmelCase : Dict ):
'''simple docstring'''
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = load_spm(self.spm_file , self.sp_model_kwargs )
def __A ( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase_ = Path(lowerCAmelCase )
assert save_dir.is_dir(), F"{save_directory} should be a directory"
UpperCAmelCase_ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
UpperCAmelCase_ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(lowerCAmelCase , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (str(lowerCAmelCase ), str(lowerCAmelCase ))
def __lowerCAmelCase ( A , A ):
UpperCAmelCase_ = sentencepiece.SentencePieceProcessor(**A )
spm.Load(str(A ) )
return spm
def __lowerCAmelCase ( A ):
with open(A , "r" ) as f:
return json.load(A )
def __lowerCAmelCase ( A , A ):
with open(A , "w" ) as f:
json.dump(A , A , indent=2 )
| 268
| 1
|
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_a : str = {
# 1536-bit
5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 2048-bit
1_4: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AACAA68FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 3072-bit
1_5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 4096-bit
1_6: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"""
+ """FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 6144-bit
1_7: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"""
+ """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"""
+ """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"""
+ """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"""
+ """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"""
+ """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"""
+ """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"""
+ """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"""
+ """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"""
+ """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"""
+ """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"""
+ """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"""
+ """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"""
+ """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"""
+ """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"""
+ """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"""
+ """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"""
+ """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"""
+ """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"""
+ """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"""
+ """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"""
+ """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"""
+ """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"""
+ """6DCC4024FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 8192-bit
1_8: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"""
+ """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"""
+ """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"""
+ """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"""
+ """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"""
+ """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"""
+ """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"""
+ """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"""
+ """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"""
+ """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"""
+ """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"""
+ """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"""
+ """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"""
+ """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"""
+ """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"""
+ """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"""
+ """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"""
+ """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"""
+ """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"""
+ """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"""
+ """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
}
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE = 14 ):
'''simple docstring'''
if group not in primes:
raise ValueError("""Unsupported Group""" )
__lowerCAmelCase = primes[group]["""prime"""]
__lowerCAmelCase = primes[group]["""generator"""]
__lowerCAmelCase = int(hexlify(urandom(32 ) ),base=16 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = pow(self.generator,self.__private_key,self.prime )
return hex(__SCREAMING_SNAKE_CASE )[2:]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(__SCREAMING_SNAKE_CASE,(self.prime - 1) // 2,self.prime ) == 1
)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = int(__SCREAMING_SNAKE_CASE,base=16 )
if not self.is_valid_public_key(__SCREAMING_SNAKE_CASE ):
raise ValueError("""Invalid public key""" )
__lowerCAmelCase = pow(__SCREAMING_SNAKE_CASE,self.__private_key,self.prime )
return shaaaa(str(__SCREAMING_SNAKE_CASE ).encode() ).hexdigest()
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(__SCREAMING_SNAKE_CASE,(prime - 1) // 2,__SCREAMING_SNAKE_CASE ) == 1
)
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 14 ):
'''simple docstring'''
__lowerCAmelCase = int(__SCREAMING_SNAKE_CASE,base=16 )
__lowerCAmelCase = int(__SCREAMING_SNAKE_CASE,base=16 )
__lowerCAmelCase = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
raise ValueError("""Invalid public key""" )
__lowerCAmelCase = pow(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
return shaaaa(str(__SCREAMING_SNAKE_CASE ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : torch.FloatTensor
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE=True,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = torch.nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[0],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
# down
__lowerCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_down_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,add_downsample=not is_final_block,resnet_eps=1e-6,downsample_padding=0,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""",attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# out
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = 2 * out_channels if double_z else out_channels
__lowerCAmelCase = nn.Convad(block_out_channels[-1],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = x
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""","""1.11.0""" ):
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
__lowerCAmelCase = down_block(__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE="group",):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[-1],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = in_channels if norm_type == """spatial""" else None
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# up
__lowerCAmelCase = list(reversed(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = reversed_block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_up_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block + 1,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,prev_output_channel=__SCREAMING_SNAKE_CASE,add_upsample=not is_final_block,resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,resnet_time_scale_shift=__SCREAMING_SNAKE_CASE,)
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = output_channel
# out
if norm_type == "spatial":
__lowerCAmelCase = SpatialNorm(block_out_channels[0],__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Convad(block_out_channels[0],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__lowerCAmelCase = z
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""","""1.11.0""" ):
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = up_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="random",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = n_e
__lowerCAmelCase = vq_embed_dim
__lowerCAmelCase = beta
__lowerCAmelCase = legacy
__lowerCAmelCase = nn.Embedding(self.n_e,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e )
__lowerCAmelCase = remap
if self.remap is not None:
self.register_buffer("""used""",torch.tensor(np.load(self.remap ) ) )
__lowerCAmelCase = self.used.shape[0]
__lowerCAmelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCAmelCase = self.re_embed
__lowerCAmelCase = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
__lowerCAmelCase = n_e
__lowerCAmelCase = sane_index_shape
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCAmelCase = match.argmax(-1 )
__lowerCAmelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCAmelCase = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device )
else:
__lowerCAmelCase = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCAmelCase = 0 # simply set to zero
__lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :],1,__SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = z.permute(0,2,3,1 ).contiguous()
__lowerCAmelCase = z.view(-1,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCAmelCase = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE,self.embedding.weight ),dim=1 )
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
__lowerCAmelCase = None
__lowerCAmelCase = None
# compute loss for embedding
if not self.legacy:
__lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCAmelCase = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
if self.remap is not None:
__lowerCAmelCase = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.remap_to_used(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = min_encoding_indices.reshape(-1,1 ) # flatten
if self.sane_index_shape:
__lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.remap is not None:
__lowerCAmelCase = indices.reshape(shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
__lowerCAmelCase = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
return z_q
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = parameters
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2,dim=1 )
__lowerCAmelCase = torch.clamp(self.logvar,-30.0,20.0 )
__lowerCAmelCase = deterministic
__lowerCAmelCase = torch.exp(0.5 * self.logvar )
__lowerCAmelCase = torch.exp(self.logvar )
if self.deterministic:
__lowerCAmelCase = __lowerCAmelCase = torch.zeros_like(
self.mean,device=self.parameters.device,dtype=self.parameters.dtype )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = randn_tensor(
self.mean.shape,generator=__SCREAMING_SNAKE_CASE,device=self.parameters.device,dtype=self.parameters.dtype )
__lowerCAmelCase = self.mean + self.std * sample
return x
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,dim=[1, 2, 3],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCAmelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.mean
| 689
| 1
|
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def lowerCamelCase_ ( lowercase__ , lowercase__):
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative")
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative")
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance)))),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
'''simple docstring'''
class lowercase :
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> None:
'''simple docstring'''
lowerCamelCase__ = {} # Mapping from char to TrieNode
lowerCamelCase__ = False
def a__ ( self : List[Any] , __lowerCamelCase : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(__lowerCamelCase )
def a__ ( self : int , __lowerCamelCase : str ) -> None:
'''simple docstring'''
lowerCamelCase__ = self
for char in word:
if char not in curr.nodes:
lowerCamelCase__ = TrieNode()
lowerCamelCase__ = curr.nodes[char]
lowerCamelCase__ = True
def a__ ( self : Optional[Any] , __lowerCamelCase : str ) -> bool:
'''simple docstring'''
lowerCamelCase__ = self
for char in word:
if char not in curr.nodes:
return False
lowerCamelCase__ = curr.nodes[char]
return curr.is_leaf
def a__ ( self : Optional[Any] , __lowerCamelCase : str ) -> None:
'''simple docstring'''
def _delete(__lowerCamelCase : TrieNode , __lowerCamelCase : str , __lowerCamelCase : int ) -> bool:
if index == len(__lowerCamelCase ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCamelCase__ = False
return len(curr.nodes ) == 0
lowerCamelCase__ = word[index]
lowerCamelCase__ = curr.nodes.get(__lowerCamelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCamelCase__ = _delete(__lowerCamelCase , __lowerCamelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __lowerCamelCase , 0 )
def lowerCamelCase_ ( lowercase__ , lowercase__):
if node.is_leaf:
print(lowercase__ , end=" ")
for key, value in node.nodes.items():
print_words(lowercase__ , word + key)
def lowerCamelCase_ ( ):
lowerCamelCase__ = "banana bananas bandana band apple all beast".split()
lowerCamelCase__ = TrieNode()
root.insert_many(lowercase__)
# print_words(root, "")
assert all(root.find(lowercase__) for word in words)
assert root.find("banana")
assert not root.find("bandanas")
assert not root.find("apps")
assert root.find("apple")
assert root.find("all")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def lowerCamelCase_ ( lowercase__ , lowercase__):
print(str(lowercase__) , "works!" if passes else "doesn't work :(")
def lowerCamelCase_ ( ):
assert test_trie()
def lowerCamelCase_ ( ):
print_results("Testing trie functionality" , test_trie())
if __name__ == "__main__":
main()
| 187
| 0
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = """ylacombe/bark-small"""
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = """en_speaker_1"""
_UpperCAmelCase = """This is a test string"""
_UpperCAmelCase = """speaker_embeddings_path.json"""
_UpperCAmelCase = """speaker_embeddings"""
def lowerCAmelCase_ ( self : Tuple , **__lowerCAmelCase : List[Any] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BarkProcessor(tokenizer=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_UpperCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_UpperCAmelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_UpperCAmelCase = 35
_UpperCAmelCase = 2
_UpperCAmelCase = 8
_UpperCAmelCase = {
"""semantic_prompt""": np.ones(__lowerCAmelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_UpperCAmelCase = processor(text=self.input_string , voice_preset=__lowerCAmelCase )
_UpperCAmelCase = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_UpperCAmelCase = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = processor(text=self.input_string , voice_preset=__lowerCAmelCase )
_UpperCAmelCase = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_UpperCAmelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BarkProcessor(tokenizer=__lowerCAmelCase )
_UpperCAmelCase = processor(text=self.input_string )
_UpperCAmelCase = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 277
|
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = False ):
"""simple docstring"""
if radian_mode:
return [magnitude * cos(lowercase ), magnitude * sin(lowercase )]
return [magnitude * cos(radians(lowercase ) ), magnitude * sin(radians(lowercase ) )]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = 10**-1 ):
"""simple docstring"""
_UpperCAmelCase = cross(lowercase ,lowercase )
_UpperCAmelCase = sum(lowercase )
return abs(lowercase ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCAmelCase__ = array(
[
polar_force(718.4, 1_8_0 - 3_0),
polar_force(879.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
UpperCAmelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCAmelCase__ = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
UpperCAmelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCAmelCase__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
UpperCAmelCase__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 277
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : str = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = '''canine'''
def __init__( self : str , _snake_case : Dict=768 , _snake_case : int=12 , _snake_case : Optional[Any]=12 , _snake_case : List[str]=3072 , _snake_case : Any="gelu" , _snake_case : List[Any]=0.1 , _snake_case : List[Any]=0.1 , _snake_case : Any=1_6384 , _snake_case : Union[str, Any]=16 , _snake_case : str=0.02 , _snake_case : Any=1E-1_2 , _snake_case : Tuple=0 , _snake_case : Dict=0XE_0_0_0 , _snake_case : int=0XE_0_0_1 , _snake_case : List[str]=4 , _snake_case : Optional[Any]=4 , _snake_case : Union[str, Any]=8 , _snake_case : Optional[int]=1_6384 , _snake_case : Tuple=128 , **_snake_case : Optional[Any] , ):
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
__lowercase : int = max_position_embeddings
__lowercase : List[Any] = hidden_size
__lowercase : str = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : Union[str, Any] = intermediate_size
__lowercase : Union[str, Any] = hidden_act
__lowercase : List[str] = hidden_dropout_prob
__lowercase : List[str] = attention_probs_dropout_prob
__lowercase : Optional[int] = initializer_range
__lowercase : List[Any] = type_vocab_size
__lowercase : List[str] = layer_norm_eps
# Character config:
__lowercase : Union[str, Any] = downsampling_rate
__lowercase : Union[str, Any] = upsampling_kernel_size
__lowercase : Optional[int] = num_hash_functions
__lowercase : List[Any] = num_hash_buckets
__lowercase : int = local_transformer_stride
| 709
|
from __future__ import annotations
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
if not nums:
return 0
__lowercase : List[Any] = nums[0]
__lowercase : Union[str, Any] = 0
for num in nums[1:]:
__lowercase , __lowercase : Dict = (
max_excluding + num,
max(__lowerCAmelCase , __lowerCAmelCase ),
)
return max(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284
| 0
|
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Any = hf_hub_url(repo_id=SCREAMING_SNAKE_CASE , path=SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(SCREAMING_SNAKE_CASE )}'''
| 590
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ , A_ : List[Any] = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
A_ , A_ : int = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
A_ : int = controlnet_params
A_ : Union[str, Any] = '''bird'''
A_ : Any = jax.device_count()
A_ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
A_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
A_ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
A_ : Dict = jax.random.PRNGKey(0 )
A_ : str = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
A_ : Optional[Any] = replicate(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = shard(_SCREAMING_SNAKE_CASE )
A_ : Dict = shard(_SCREAMING_SNAKE_CASE )
A_ : Any = pipe(
prompt_ids=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , prng_seed=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , jit=_SCREAMING_SNAKE_CASE , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
A_ : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A_ : List[Any] = images[0, 253:256, 253:256, -1]
A_ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A_ : Union[str, Any] = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ , A_ : Optional[int] = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
A_ , A_ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
A_ : str = controlnet_params
A_ : Tuple = '''Chef in the kitchen'''
A_ : List[str] = jax.device_count()
A_ : Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
A_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
A_ : List[Any] = pipe.prepare_image_inputs([pose_image] * num_samples )
A_ : List[str] = jax.random.PRNGKey(0 )
A_ : str = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
A_ : Dict = replicate(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = shard(_SCREAMING_SNAKE_CASE )
A_ : Any = shard(_SCREAMING_SNAKE_CASE )
A_ : List[str] = pipe(
prompt_ids=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , prng_seed=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , jit=_SCREAMING_SNAKE_CASE , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
A_ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A_ : int = images[0, 253:256, 253:256, -1]
A_ : Any = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A_ : Optional[Any] = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 590
| 1
|
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCamelCase__ :
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
raise NotImplementedError()
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError()
class UpperCamelCase__ ( __A ):
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple = False , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
lowercase_ = tokenizer
lowercase_ = skip_prompt
lowercase_ = decode_kwargs
# variables used in the streaming process
lowercase_ = []
lowercase_ = 0
lowercase_ = True
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
lowercase_ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowercase_ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowercase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
lowercase_ = text[self.print_len :]
lowercase_ = []
lowercase_ = 0
# If the last token is a CJK character, we print the characters.
elif len(UpperCamelCase__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowercase_ = text[self.print_len :]
self.print_len += len(UpperCamelCase__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowercase_ = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(UpperCamelCase__ )
self.on_finalized_text(UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if len(self.token_cache ) > 0:
lowercase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowercase_ = text[self.print_len :]
lowercase_ = []
lowercase_ = 0
else:
lowercase_ = ''
lowercase_ = True
self.on_finalized_text(UpperCamelCase__ , stream_end=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str = False ):
'''simple docstring'''
print(UpperCamelCase__ , flush=UpperCamelCase__ , end="""""" if not stream_end else None )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if (
(cp >= 0X4_e_0_0 and cp <= 0X9_f_f_f)
or (cp >= 0X3_4_0_0 and cp <= 0X4_d_b_f) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_a_6_d_f) #
or (cp >= 0X2_a_7_0_0 and cp <= 0X2_b_7_3_f) #
or (cp >= 0X2_b_7_4_0 and cp <= 0X2_b_8_1_f) #
or (cp >= 0X2_b_8_2_0 and cp <= 0X2_c_e_a_f) #
or (cp >= 0Xf_9_0_0 and cp <= 0Xf_a_f_f)
or (cp >= 0X2_f_8_0_0 and cp <= 0X2_f_a_1_f) #
): #
return True
return False
class UpperCamelCase__ ( __A ):
def __init__( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] = False , UpperCamelCase__ : Union[str, Any] = None , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = Queue()
lowercase_ = None
lowercase_ = timeout
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any = False ):
'''simple docstring'''
self.text_queue.put(UpperCamelCase__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : List[Any] ):
'''simple docstring'''
return self
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 704
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a = TypeVar('T')
class UpperCamelCase__ ( Generic[T] ):
__SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys
__SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache
__SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache
def __init__( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = deque()
lowercase_ = set()
if not n:
lowercase_ = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
lowercase_ = n
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase_ = self.dq_store.pop()
self.key_reference.remove(UpperCamelCase__ )
else:
self.dq_store.remove(UpperCamelCase__ )
self.dq_store.appendleft(UpperCamelCase__ )
self.key_reference.add(UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
for k in self.dq_store:
print(UpperCamelCase__ )
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
a = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 650
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
for attribute in key.split(""".""" ):
lowercase__ : Union[str, Any] = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
lowercase__ : str = getattr(lowercase_ , lowercase_ ).shape
else:
lowercase__ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowercase__ : Union[str, Any] = value
elif weight_type == "weight_g":
lowercase__ : str = value
elif weight_type == "weight_v":
lowercase__ : str = value
elif weight_type == "bias":
lowercase__ : Optional[int] = value
else:
lowercase__ : Any = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
lowercase__ : List[Any] = []
lowercase__ : Union[str, Any] = fairseq_model.state_dict()
lowercase__ : Any = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == """group""" , )
lowercase__ : str = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : Optional[int] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase__ : Tuple = True
if "*" in mapped_key:
lowercase__ : Optional[Any] = name.split(lowercase_ )[0].split(""".""" )[-2]
lowercase__ : Optional[Any] = mapped_key.replace("""*""" , lowercase_ )
if "weight_g" in name:
lowercase__ : Optional[int] = """weight_g"""
elif "weight_v" in name:
lowercase__ : Dict = """weight_v"""
elif "weight" in name:
lowercase__ : List[str] = """weight"""
elif "bias" in name:
lowercase__ : str = """bias"""
else:
lowercase__ : List[Any] = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F'Unused weights: {unused_weights}' )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
lowercase__ : int = full_name.split("""conv_layers.""" )[-1]
lowercase__ : int = name.split(""".""" )
lowercase__ : Union[str, Any] = int(items[0] )
lowercase__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowercase__ : str = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowercase__ : Tuple = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowercase__ : Union[str, Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowercase__ : Optional[Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase_ )
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ : Optional[Any] = SEWConfig()
if is_finetuned:
lowercase__ : Any = model.wav_encoder.wav_model.cfg
else:
lowercase__ : Dict = model.cfg
lowercase__ : Optional[Any] = fs_config.conv_bias
lowercase__ : Tuple = eval(fs_config.conv_feature_layers )
lowercase__ : List[str] = [x[0] for x in conv_layers]
lowercase__ : Dict = [x[1] for x in conv_layers]
lowercase__ : Tuple = [x[2] for x in conv_layers]
lowercase__ : List[str] = """gelu"""
lowercase__ : Union[str, Any] = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
lowercase__ : Union[str, Any] = 0.0
lowercase__ : Tuple = fs_config.activation_fn.name
lowercase__ : Tuple = fs_config.encoder_embed_dim
lowercase__ : List[str] = 0.02
lowercase__ : Optional[Any] = fs_config.encoder_ffn_embed_dim
lowercase__ : Optional[Any] = 1E-5
lowercase__ : List[Any] = fs_config.encoder_layerdrop
lowercase__ : Any = fs_config.encoder_attention_heads
lowercase__ : Any = fs_config.conv_pos_groups
lowercase__ : Dict = fs_config.conv_pos
lowercase__ : List[Any] = len(lowercase_ )
lowercase__ : Union[str, Any] = fs_config.encoder_layers
lowercase__ : Optional[Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowercase__ : Optional[Any] = model.cfg
lowercase__ : Union[str, Any] = fs_config.final_dropout
lowercase__ : int = fs_config.layerdrop
lowercase__ : int = fs_config.activation_dropout
lowercase__ : Optional[Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowercase__ : Tuple = fs_config.attention_dropout
lowercase__ : Any = fs_config.dropout_input
lowercase__ : Any = fs_config.dropout
lowercase__ : Dict = fs_config.mask_channel_length
lowercase__ : Optional[int] = fs_config.mask_channel_prob
lowercase__ : Any = fs_config.mask_length
lowercase__ : Dict = fs_config.mask_prob
lowercase__ : Dict = """Wav2Vec2FeatureExtractor"""
lowercase__ : List[Any] = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=True ) -> int:
'''simple docstring'''
if is_finetuned:
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowercase__ : Optional[Any] = SEWConfig.from_pretrained(lowercase_ )
else:
lowercase__ : Optional[int] = convert_config(model[0] , lowercase_ )
lowercase__ : Dict = model[0].eval()
lowercase__ : Any = True if config.feat_extract_norm == """layer""" else False
lowercase__ : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
if is_finetuned:
if dict_path:
lowercase__ : Optional[int] = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ : List[Any] = target_dict.pad_index
lowercase__ : int = target_dict.bos_index
lowercase__ : Optional[Any] = target_dict.pad_index
lowercase__ : List[str] = target_dict.bos_index
lowercase__ : Optional[int] = target_dict.eos_index
lowercase__ : Union[str, Any] = len(target_dict.symbols )
lowercase__ : Optional[int] = os.path.join(lowercase_ , """vocab.json""" )
if not os.path.isdir(lowercase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase_ ) )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , lowercase_ )
lowercase__ : Optional[Any] = WavaVecaCTCTokenizer(
lowercase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase_ , )
lowercase__ : Union[str, Any] = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
lowercase__ : List[str] = SEWForCTC(lowercase_ )
else:
lowercase__ : str = SEWModel(lowercase_ )
feature_extractor.save_pretrained(lowercase_ )
recursively_load_weights(lowercase_ , lowercase_ , lowercase_ )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCamelCase__ : Optional[int] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 12
|
from collections import defaultdict
from math import ceil, sqrt
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 1_000_000 , _SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
_A = defaultdict(_SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_A = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_A = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"{solution() = }")
| 27
| 0
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase: Any = logging.get_logger(__name__)
lowerCAmelCase: List[Any] = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class a__( lowerCamelCase__ ):
lowercase__ = """mctct"""
def __init__( self : List[Any] , __snake_case : Union[str, Any]=80_65 , __snake_case : Tuple=15_36 , __snake_case : Optional[Any]=36 , __snake_case : str=61_44 , __snake_case : List[Any]=4 , __snake_case : Union[str, Any]=3_84 , __snake_case : Union[str, Any]=9_20 , __snake_case : Union[str, Any]=1e-5 , __snake_case : int=0.3 , __snake_case : Any="relu" , __snake_case : Optional[Any]=0.02 , __snake_case : List[str]=0.3 , __snake_case : Any=0.3 , __snake_case : str=1 , __snake_case : Union[str, Any]=0 , __snake_case : Optional[int]=2 , __snake_case : Any=1 , __snake_case : List[Any]=0.3 , __snake_case : Any=1 , __snake_case : Dict=(7,) , __snake_case : Union[str, Any]=(3,) , __snake_case : Optional[Any]=80 , __snake_case : int=1 , __snake_case : Optional[int]=None , __snake_case : str="sum" , __snake_case : str=False , **__snake_case : Tuple , ):
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
a : Any = vocab_size
a : Dict = hidden_size
a : Any = num_hidden_layers
a : List[Any] = intermediate_size
a : Optional[int] = num_attention_heads
a : Tuple = attention_head_dim
a : Optional[int] = max_position_embeddings
a : Any = layer_norm_eps
a : List[Any] = layerdrop
a : Union[str, Any] = hidden_act
a : Dict = initializer_range
a : Dict = hidden_dropout_prob
a : Tuple = attention_probs_dropout_prob
a : Union[str, Any] = pad_token_id
a : List[Any] = bos_token_id
a : List[str] = eos_token_id
a : Dict = conv_glu_dim
a : List[str] = conv_dropout
a : List[Any] = num_conv_layers
a : Optional[int] = input_feat_per_channel
a : List[Any] = input_channels
a : List[Any] = conv_channels
a : Optional[Any] = ctc_loss_reduction
a : List[Any] = ctc_zero_infinity
# prevents config testing fail with exporting to json
a : Optional[int] = list(__snake_case )
a : List[str] = list(__snake_case )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
| 705
|
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCAmelCase: List[str] = TypeVar('T')
def lowerCamelCase__ ( _A ):
return (position - 1) // 2
def lowerCamelCase__ ( _A ):
return (2 * position) + 1
def lowerCamelCase__ ( _A ):
return (2 * position) + 2
class a__( Generic[T] ):
def __init__( self : Optional[Any] ):
a : list[tuple[T, int]] = []
a : dict[T, int] = {}
a : int = 0
def __len__( self : Union[str, Any] ):
return self.elements
def __repr__( self : Any ):
return str(self.heap )
def lowercase_ ( self : Optional[int] ):
# Check if the priority queue is empty
return self.elements == 0
def lowercase_ ( self : Union[str, Any] , __snake_case : T , __snake_case : int ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
a : int = self.elements
self.elements += 1
self._bubble_up(__snake_case )
def lowercase_ ( self : Union[str, Any] ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
a , a : str = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
a , a : List[Any] = self.heap[0]
self._bubble_down(__snake_case )
return elem
def lowercase_ ( self : List[str] , __snake_case : T , __snake_case : int ):
# Update the weight of the given key
a : Any = self.position_map[elem]
a : Union[str, Any] = (elem, weight)
if position > 0:
a : Optional[Any] = get_parent_position(__snake_case )
a , a : Union[str, Any] = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__snake_case )
else:
self._bubble_down(__snake_case )
else:
self._bubble_down(__snake_case )
def lowercase_ ( self : int , __snake_case : T ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
a : Union[str, Any] = self.position_map[elem]
if curr_pos == 0:
return None
a : Union[str, Any] = get_parent_position(__snake_case )
a , a : Tuple = self.heap[curr_pos]
a , a : List[Any] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__snake_case , __snake_case )
return self._bubble_up(__snake_case )
return None
def lowercase_ ( self : Tuple , __snake_case : T ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
a : str = self.position_map[elem]
a , a : List[Any] = self.heap[curr_pos]
a : int = get_child_left_position(__snake_case )
a : Dict = get_child_right_position(__snake_case )
if child_left_position < self.elements and child_right_position < self.elements:
a , a : int = self.heap[child_left_position]
a , a : Optional[Any] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__snake_case , __snake_case )
return self._bubble_down(__snake_case )
if child_left_position < self.elements:
a , a : Dict = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__snake_case , __snake_case )
return self._bubble_down(__snake_case )
else:
return None
if child_right_position < self.elements:
a , a : List[Any] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__snake_case , __snake_case )
return self._bubble_down(__snake_case )
return None
def lowercase_ ( self : List[Any] , __snake_case : int , __snake_case : int ):
# Swap the nodes at the given positions
a : Optional[int] = self.heap[nodea_pos][0]
a : int = self.heap[nodea_pos][0]
a , a : List[str] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
a : Tuple = nodea_pos
a : Any = nodea_pos
class a__( Generic[T] ):
def __init__( self : Optional[int] ):
a : dict[T, dict[T, int]] = {}
a : int = 0
def __repr__( self : List[str] ):
return str(self.connections )
def __len__( self : str ):
return self.nodes
def lowercase_ ( self : int , __snake_case : T ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
a : Optional[int] = {}
self.nodes += 1
def lowercase_ ( self : Dict , __snake_case : T , __snake_case : T , __snake_case : int ):
# Add an edge between 2 nodes in the graph
self.add_node(__snake_case )
self.add_node(__snake_case )
a : Any = weight
a : Dict = weight
def lowerCamelCase__ ( _A , ):
a : dict[T, int] = {node: maxsize for node in graph.connections}
a : dict[T, T | None] = {node: None for node in graph.connections}
a : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_A , _A )
if priority_queue.is_empty():
return dist, parent
# initialization
a : int = priority_queue.extract_min()
a : Dict = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
a : Tuple = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_A , dist[neighbour] )
a : Dict = node
# running prim's algorithm
while not priority_queue.is_empty():
a : List[str] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
a : int = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_A , dist[neighbour] )
a : Optional[int] = node
return dist, parent
| 195
| 0
|
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = []
for line in lines:
_lowerCamelCase : List[str] = re.sub(r"#.*" , "" , _lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "\n".join(_lowerCAmelCase )
# Make a hash from all this code
_lowerCamelCase : Tuple = full_str.encode("utf-8" )
return shaaaa(_lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase_ : int = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase_ : Union[str, Any] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase_ : Dict = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCAmelCase_ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 44
|
def A__ ( snake_case_ : float , snake_case_ : float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64
| 0
|
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
SCREAMING_SNAKE_CASE_ = threading.Lock()
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
SCREAMING_SNAKE_CASE_ = logging.WARNING
SCREAMING_SNAKE_CASE_ = True
def A__ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = os.getenv("TRANSFORMERS_VERBOSITY" , A__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def A__ ( ) -> str:
'''simple docstring'''
return __name__.split("." )[0]
def A__ ( ) -> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def A__ ( ) -> None:
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_UpperCAmelCase = logging.StreamHandler() # Set sys.stderr as stream.
_UpperCAmelCase = sys.stderr.flush
# Apply our default configuration to the library root logger.
_UpperCAmelCase = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_UpperCAmelCase = False
def A__ ( ) -> None:
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
_UpperCAmelCase = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_UpperCAmelCase = None
def A__ ( ) -> Tuple:
'''simple docstring'''
return log_levels
def A__ ( A__ = None ) -> logging.Logger:
'''simple docstring'''
if name is None:
_UpperCAmelCase = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(A__ )
def A__ ( ) -> int:
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def A__ ( A__ ) -> None:
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(A__ )
def A__ ( ) -> int:
'''simple docstring'''
return set_verbosity(A__ )
def A__ ( ) -> Optional[Any]:
'''simple docstring'''
return set_verbosity(A__ )
def A__ ( ) -> Dict:
'''simple docstring'''
return set_verbosity(A__ )
def A__ ( ) -> Union[str, Any]:
'''simple docstring'''
return set_verbosity(A__ )
def A__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def A__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def A__ ( A__ ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(A__ )
def A__ ( A__ ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(A__ )
def A__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
_UpperCAmelCase = False
def A__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
_UpperCAmelCase = True
def A__ ( ) -> None:
'''simple docstring'''
_UpperCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
_UpperCAmelCase = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(A__ )
def A__ ( ) -> None:
'''simple docstring'''
_UpperCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(A__ )
def A__ ( self , *A__ , **A__ ) -> int:
'''simple docstring'''
_UpperCAmelCase = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , A__ )
if no_advisory_warnings:
return
self.warning(*A__ , **A__ )
SCREAMING_SNAKE_CASE_ = warning_advice
@functools.lru_cache(A__ )
def A__ ( self , *A__ , **A__ ) -> Optional[int]:
'''simple docstring'''
self.warning(*A__ , **A__ )
SCREAMING_SNAKE_CASE_ = warning_once
class a :
"""simple docstring"""
def __init__( self , *snake_case_ , **snake_case_ ) -> Dict: # pylint: disable=unused-argument
_UpperCAmelCase = args[0] if args else None
def __iter__( self ) -> int:
return iter(self._iterator )
def __getattr__( self , snake_case_ ) -> Any:
def empty_fn(*snake_case_ , **snake_case_ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> List[str]:
return self
def __exit__( self , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
return
class a :
"""simple docstring"""
def __call__( self , *snake_case_ , **snake_case_ ) -> Dict:
if _tqdm_active:
return tqdm_lib.tqdm(*snake_case_ , **snake_case_ )
else:
return EmptyTqdm(*snake_case_ , **snake_case_ )
def __A ( self , *snake_case_ , **snake_case_ ) -> Dict:
_UpperCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case_ , **snake_case_ )
def __A ( self ) -> str:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
SCREAMING_SNAKE_CASE_ = _tqdm_cls()
def A__ ( ) -> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def A__ ( ) -> Tuple:
'''simple docstring'''
global _tqdm_active
_UpperCAmelCase = True
hf_hub_utils.enable_progress_bars()
def A__ ( ) -> int:
'''simple docstring'''
global _tqdm_active
_UpperCAmelCase = False
hf_hub_utils.disable_progress_bars()
| 700
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
SCREAMING_SNAKE_CASE_ = {'''mobilebert-uncased''': 512}
SCREAMING_SNAKE_CASE_ = {}
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : Any = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = MobileBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[int]:
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case_ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case_ ) != tokenize_chinese_chars
):
_UpperCAmelCase = getattr(snake_case_ , normalizer_state.pop("type" ) )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = strip_accents
_UpperCAmelCase = tokenize_chinese_chars
_UpperCAmelCase = normalizer_class(**snake_case_ )
_UpperCAmelCase = do_lower_case
def __A ( self , snake_case_ , snake_case_=None ) -> List[str]:
_UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_UpperCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 579
| 0
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = RobertaTokenizer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RobertaTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Any = {'''cls_token''': '''<s>'''}
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_lowercase = dict(zip(__A ,range(len(__A ) ) ) )
_lowercase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowercase = {'unk_token': '<unk>'}
_lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
def __UpperCAmelCase ( self : List[str] ,**__A : List[Any] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : Union[str, Any] ,**__A : Any ) -> str:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : str ,__A : List[str] ) -> List[Any]:
_lowercase = 'lower newer'
_lowercase = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
_lowercase = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_lowercase = 'lower newer'
_lowercase = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowercase = tokenizer.tokenize(__A ) # , add_prefix_space=True)
self.assertListEqual(__A ,__A )
_lowercase = tokens + [tokenizer.unk_token]
_lowercase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) ,__A )
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
_lowercase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' ,add_special_tokens=__A ) ,[0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' ,add_special_tokens=__A ) ,[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] ,)
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
_lowercase = self.tokenizer_class.from_pretrained('roberta-base' )
_lowercase = tokenizer.encode('sequence builders' ,add_special_tokens=__A )
_lowercase = tokenizer.encode('multi-sequence build' ,add_special_tokens=__A )
_lowercase = tokenizer.encode(
'sequence builders' ,add_special_tokens=__A ,add_prefix_space=__A )
_lowercase = tokenizer.encode(
'sequence builders' ,'multi-sequence build' ,add_special_tokens=__A ,add_prefix_space=__A )
_lowercase = tokenizer.build_inputs_with_special_tokens(__A )
_lowercase = tokenizer.build_inputs_with_special_tokens(__A ,__A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
_lowercase = self.get_tokenizer()
_lowercase = 'Encode this sequence.'
_lowercase = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
_lowercase = tokenizer.encode(__A ,add_special_tokens=__A ,add_prefix_space=__A )
_lowercase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__A ,__A )
_lowercase = tokenizer.encode(__A ,add_special_tokens=__A ,add_prefix_space=__A )
_lowercase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__A ,__A )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_lowercase = tokenizer.encode(__A ,add_special_tokens=__A )
_lowercase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__A ,__A )
# Testing spaces after special tokens
_lowercase = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(__A ,lstrip=__A ,rstrip=__A )} ) # mask token has a left space
_lowercase = tokenizer.convert_tokens_to_ids(__A )
_lowercase = 'Encode <mask> sequence'
_lowercase = 'Encode <mask>sequence'
_lowercase = tokenizer.encode(__A )
_lowercase = encoded.index(__A )
_lowercase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__A ,__A )
_lowercase = tokenizer.encode(__A )
_lowercase = encoded.index(__A )
_lowercase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__A ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
pass
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase = self.rust_tokenizer_class.from_pretrained(__A ,**__A )
_lowercase = self.tokenizer_class.from_pretrained(__A ,**__A )
_lowercase = 'A, <mask> AllenNLP sentence.'
_lowercase = tokenizer_r.encode_plus(__A ,add_special_tokens=__A ,return_token_type_ids=__A )
_lowercase = tokenizer_p.encode_plus(__A ,add_special_tokens=__A ,return_token_type_ids=__A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) ,sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) ,sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) ,)
_lowercase = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_lowercase = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__A ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
__A ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def __UpperCAmelCase ( self : int ) -> Any:
for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2 ):
_lowercase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A )
_lowercase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_lowercase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] ,__A )
self.assertEqual(post_processor_state['add_prefix_space'] ,__A )
self.assertEqual(post_processor_state['trim_offsets'] ,__A )
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_lowercase = F"""{text_of_1_token} {text_of_1_token}"""
_lowercase = self.rust_tokenizer_class.from_pretrained(
__A ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A )
_lowercase = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__A ) + 1, len(__A ) + 1 + len(__A )) ,)
_lowercase = self.rust_tokenizer_class.from_pretrained(
__A ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A )
_lowercase = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__A ) + 1, len(__A ) + 1 + len(__A )) ,)
_lowercase = self.rust_tokenizer_class.from_pretrained(
__A ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A )
_lowercase = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__A ), len(__A ) + 1 + len(__A )) ,)
_lowercase = self.rust_tokenizer_class.from_pretrained(
__A ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A )
_lowercase = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__A ), len(__A ) + 1 + len(__A )) ,)
_lowercase = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_lowercase = self.rust_tokenizer_class.from_pretrained(
__A ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A )
_lowercase = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(__A ) + 1, 1 + len(__A ) + 1 + len(__A )) ,)
_lowercase = self.rust_tokenizer_class.from_pretrained(
__A ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A )
_lowercase = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) ,)
_lowercase = self.rust_tokenizer_class.from_pretrained(
__A ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A )
_lowercase = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) ,)
| 67
|
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
def merge(UpperCAmelCase__ ,UpperCAmelCase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCAmelCase__ ) <= 1:
return collection
_SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : Any = input('Enter numbers separated by a comma:\n').strip()
snake_case : List[Any] = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 605
| 0
|
import functools
def UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_A= len(lowerCAmelCase_ )
_A= len(lowerCAmelCase_ )
@functools.cache
def min_distance(lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
_A= int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCAmelCase_ ) , 1 + min_distance(lowerCAmelCase_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
@slow
def a__ ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(lowerCAmelCase__ ):
_A= AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_A= FlaxAutoModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def a__ ( self ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(lowerCAmelCase__ ):
_A= AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_A= FlaxAutoModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def a__ ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
_A= AutoTokenizer.from_pretrained(lowerCAmelCase__ )
_A= FlaxBertModel.from_pretrained(lowerCAmelCase__ )
_A= tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase__ ):
return model(**lowerCAmelCase__ )
eval(**lowerCAmelCase__ ).block_until_ready()
@slow
def a__ ( self ):
for model_name in ["roberta-base", "roberta-large"]:
_A= AutoTokenizer.from_pretrained(lowerCAmelCase__ )
_A= FlaxRobertaModel.from_pretrained(lowerCAmelCase__ )
_A= tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase__ ):
return model(**lowerCAmelCase__ )
eval(**lowerCAmelCase__ ).block_until_ready()
def a__ ( self ):
with self.assertRaisesRegex(
lowerCAmelCase__ , 'bert-base is not a local folder and is not a valid model identifier' ):
_A= FlaxAutoModel.from_pretrained('bert-base' )
def a__ ( self ):
with self.assertRaisesRegex(
lowerCAmelCase__ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_A= FlaxAutoModel.from_pretrained(lowerCAmelCase__ , revision='aaaaaa' )
def a__ ( self ):
with self.assertRaisesRegex(
lowerCAmelCase__ , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
_A= FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def a__ ( self ):
with self.assertRaisesRegex(lowerCAmelCase__ , 'Use `from_pt=True` to load this model' ):
_A= FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 476
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = "canine"
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=16384 , _lowerCAmelCase=16 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0 , _lowerCAmelCase=0XE0_00 , _lowerCAmelCase=0XE0_01 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase=8 , _lowerCAmelCase=16384 , _lowerCAmelCase=128 , **_lowerCAmelCase , ) -> List[Any]:
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = layer_norm_eps
# Character config:
_lowerCAmelCase = downsampling_rate
_lowerCAmelCase = upsampling_kernel_size
_lowerCAmelCase = num_hash_functions
_lowerCAmelCase = num_hash_buckets
_lowerCAmelCase = local_transformer_stride
| 18
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {'vocab_file': 'vocab.txt'}
__SCREAMING_SNAKE_CASE : Tuple = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
__SCREAMING_SNAKE_CASE : Any = {
'openbmb/cpm-ant-10b': 10_24,
}
def UpperCAmelCase__ ( __magic_name__ : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : int = collections.OrderedDict()
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader:
lowerCAmelCase : int = reader.readlines()
for index, token in enumerate(__magic_name__ ):
lowerCAmelCase : str = token.rstrip('''\n''' )
lowerCAmelCase : Tuple = index
return vocab
class __magic_name__ ( snake_case ):
def __init__( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[str]="<unk>" , lowerCamelCase__ : Union[str, Any]=2_0_0 ):
lowerCAmelCase : Union[str, Any] = vocab
lowerCAmelCase : int = unk_token
lowerCAmelCase : List[str] = max_input_chars_per_word
def _A ( self : List[Any] , lowerCamelCase__ : List[str] ):
lowerCAmelCase : str = list(lowerCamelCase__ )
if len(lowerCamelCase__ ) > self.max_input_chars_per_word:
return [self.unk_token]
lowerCAmelCase : str = 0
lowerCAmelCase : Dict = []
while start < len(lowerCamelCase__ ):
lowerCAmelCase : Dict = len(lowerCamelCase__ )
lowerCAmelCase : Optional[Any] = None
while start < end:
lowerCAmelCase : Tuple = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowerCAmelCase : Dict = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCamelCase__ )
lowerCAmelCase : List[Any] = end
return sub_tokens
class __magic_name__ ( snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = False
def __init__( self : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str="<d>" , lowerCamelCase__ : Any="</d>" , lowerCamelCase__ : List[Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Optional[Any]="<pad>" , lowerCamelCase__ : Any="<unk>" , lowerCamelCase__ : Union[str, Any]="</n>" , lowerCamelCase__ : Dict="</_>" , lowerCamelCase__ : Optional[int]="left" , **lowerCamelCase__ : Optional[int] , ):
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=lowerCamelCase__ , eod_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , line_token=lowerCamelCase__ , space_token=lowerCamelCase__ , padding_side=lowerCamelCase__ , **lowerCamelCase__ , )
lowerCAmelCase : Tuple = bod_token
lowerCAmelCase : Tuple = eod_token
lowerCAmelCase : Union[str, Any] = load_vocab(lowerCamelCase__ )
lowerCAmelCase : Any = self.encoder[space_token]
lowerCAmelCase : Any = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowerCAmelCase : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase__ : x[1] ) )
lowerCAmelCase : Optional[int] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _A ( self : Optional[Any] ):
return self.encoder[self.bod_token]
@property
def _A ( self : int ):
return self.encoder[self.eod_token]
@property
def _A ( self : int ):
return self.encoder["\n"]
@property
def _A ( self : Any ):
return len(self.encoder )
def _A ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def _A ( self : str , lowerCamelCase__ : int ):
lowerCAmelCase : Optional[Any] = []
for x in jieba.cut(lowerCamelCase__ , cut_all=lowerCamelCase__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase__ ) )
return output_tokens
def _A ( self : int , lowerCamelCase__ : Tuple , **lowerCamelCase__ : Any ):
lowerCAmelCase : List[Any] = [i for i in token_ids if i >= 0]
lowerCAmelCase : Union[str, Any] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase__ , **lowerCamelCase__ )
def _A ( self : Optional[int] , lowerCamelCase__ : Any ):
return token in self.encoder
def _A ( self : Optional[Any] , lowerCamelCase__ : List[str] ):
return "".join(lowerCamelCase__ )
def _A ( self : Tuple , lowerCamelCase__ : Optional[Any] ):
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def _A ( self : Union[str, Any] , lowerCamelCase__ : Optional[Any] ):
return self.decoder.get(lowerCamelCase__ , self.unk_token )
def _A ( self : int , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
if os.path.isdir(lowerCamelCase__ ):
lowerCAmelCase : List[Any] = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowerCAmelCase : str = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowerCAmelCase : Tuple = 0
if " " in self.encoder:
lowerCAmelCase : Optional[int] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowerCAmelCase : List[Any] = self.encoder['''\n''']
del self.encoder["\n"]
lowerCAmelCase : int = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase__ : x[1] ) )
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
lowerCAmelCase : int = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def _A ( self : Any , lowerCamelCase__ : List[int] , lowerCamelCase__ : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _A ( self : str , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ ))
return [1] + ([0] * len(lowerCamelCase__ ))
| 348
| 0
|
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCamelCase__ = logging.getLogger(__name__)
UpperCamelCase__ = tf.data.AUTOTUNE
def UpperCAmelCase__ ( ) -> int:
__lowercase = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=lowercase__ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=lowercase__ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=lowercase__ , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=lowercase__ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=lowercase__ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=lowercase__ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=lowercase__ , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=lowercase__ , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=lowercase__ , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=lowercase__ , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=lowercase__ , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=lowercase__ , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=lowercase__ , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=lowercase__ , required=lowercase__ , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=lowercase__ , help="""Model ID to upload to on the Hugging Face Hub.""" )
__lowercase = parser.parse_args()
return args
def UpperCAmelCase__ ( lowercase__ ) -> List[str]:
try:
if args.tpu_name:
__lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
__lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowercase__ )
tf.tpu.experimental.initialize_tpu_system(lowercase__ )
return tpu
def UpperCAmelCase__ ( lowercase__ ) -> int:
__lowercase = 0
for file in file_list:
__lowercase = file.split("""/""" )[-1]
__lowercase = re.search(r"""-\d+-(\d+)\.tfrecord""" , lowercase__ ).group(1 )
__lowercase = int(lowercase__ )
num_samples += sample_count
return num_samples
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ) -> Dict:
__lowercase = count_samples(lowercase__ )
__lowercase = tf.data.Dataset.from_tensor_slices(lowercase__ )
if shuffle:
__lowercase = dataset.shuffle(len(lowercase__ ) )
__lowercase = tf.data.TFRecordDataset(lowercase__ , num_parallel_reads=lowercase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
__lowercase = dataset.apply(tf.data.experimental.assert_cardinality(lowercase__ ) )
__lowercase = dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
if shuffle:
assert shuffle_buffer_size is not None
__lowercase = dataset.shuffle(args.shuffle_buffer_size )
__lowercase = dataset.batch(lowercase__ , drop_remainder=lowercase__ )
__lowercase = dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
__lowercase = dataset.prefetch(lowercase__ )
return dataset
def UpperCAmelCase__ ( lowercase__ ) -> List[Any]:
if not args.no_tpu:
__lowercase = initialize_tpu(lowercase__ )
__lowercase = tf.distribute.TPUStrategy(lowercase__ )
else:
__lowercase = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
__lowercase = AutoTokenizer.from_pretrained(args.tokenizer )
__lowercase = AutoConfig.from_pretrained(args.pretrained_model_config )
__lowercase = tokenizer.vocab_size
__lowercase = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(F"No .tfrecord files found in {args.train_dataset}." )
__lowercase = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(F"No .tfrecord files found in {args.eval_dataset}." )
__lowercase = count_samples(lowercase__ )
__lowercase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
__lowercase = steps_per_epoch * args.num_epochs
with strategy.scope():
__lowercase = TFAutoModelForMaskedLM.from_config(lowercase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
__lowercase , __lowercase = create_optimizer(
num_train_steps=lowercase__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase__ , metrics=["""accuracy"""] )
def decode_fn(lowercase__ ):
__lowercase = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase__ , lowercase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowercase__ , mlm_probability=args.mlm_probability , mlm=lowercase__ , return_tensors="""tf""" )
def mask_with_collator(lowercase__ ):
# TF really needs an isin() function
__lowercase = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
__lowercase , __lowercase = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(lowercase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase__ , )
return batch
__lowercase = args.per_replica_batch_size * strategy.num_replicas_in_sync
__lowercase = prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
__lowercase = prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , )
__lowercase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase__ ) )
model.fit(
lowercase__ , validation_data=lowercase__ , epochs=args.num_epochs , callbacks=lowercase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCamelCase__ = parse_args()
main(args)
| 634
|
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> int:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("""String lengths must match!""" )
__lowercase = 0
for chara, chara in zip(lowercase__ , lowercase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634
| 1
|
'''simple docstring'''
UpperCAmelCase_ : List[Any] = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCAmelCase_ : str = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCAmelCase_ : str = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCAmelCase_ : Optional[Any] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCAmelCase_ : int = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCAmelCase_ : Optional[int] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCAmelCase_ : Optional[Any] = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCAmelCase_ : List[str] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 44
|
from __future__ import annotations
def _UpperCAmelCase ( a__):
'''simple docstring'''
if len(a__) == 0:
return []
a_ , a_ : List[Any] = min(a__), max(a__)
a_ : Tuple = int(max_value - min_value) + 1
a_ : list[list] = [[] for _ in range(a__)]
for i in my_list:
buckets[int(i - min_value)].append(a__)
return [v for bucket in buckets for v in sorted(a__)]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 540
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Dict=30 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : str=True , UpperCamelCase__ : int=True , UpperCamelCase__ : List[str]=32 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Optional[int]=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[Any]=10 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Tuple=None , ) -> Any:
'''simple docstring'''
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase =(image_size // patch_size) ** 2
__UpperCamelCase =num_patches + 1
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : Any ) -> Dict:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =TFViTModel(config=UpperCamelCase__ )
__UpperCamelCase =model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
__UpperCamelCase =self.image_size // 2
__UpperCamelCase =pixel_values[:, :, :image_size, :image_size]
__UpperCamelCase =model(UpperCamelCase__ , interpolate_pos_encoding=UpperCamelCase__ , training=UpperCamelCase__ )
__UpperCamelCase =(image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =self.type_sequence_label_size
__UpperCamelCase =TFViTForImageClassification(UpperCamelCase__ )
__UpperCamelCase =model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
__UpperCamelCase =self.image_size // 2
__UpperCamelCase =pixel_values[:, :, :image_size, :image_size]
__UpperCamelCase =model(UpperCamelCase__ , interpolate_pos_encoding=UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase =1
__UpperCamelCase =TFViTForImageClassification(UpperCamelCase__ )
__UpperCamelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase =model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowercase__ = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase_ ( self : List[str] ) -> str:
'''simple docstring'''
__UpperCamelCase =TFViTModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCAmelCase_ ( self : int ) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : int ) -> str:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , tf.keras.layers.Layer ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCAmelCase_ ( self : int ) -> int:
'''simple docstring'''
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def UpperCAmelCase_ ( self : int ) -> Any:
'''simple docstring'''
__UpperCamelCase =TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=UpperCamelCase__ , return_tensors='''tf''' )
# forward pass
__UpperCamelCase =model(**UpperCamelCase__ )
# verify the logits
__UpperCamelCase =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
__UpperCamelCase =tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
| 701
|
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__lowercase = 299_792_458
# Symbols
__lowercase , __lowercase , __lowercase , __lowercase = symbols('''ct x y z''')
def lowerCAmelCase (__UpperCamelCase : float ):
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase (__UpperCamelCase : float ):
"""simple docstring"""
return 1 / sqrt(1 - beta(__UpperCamelCase ) ** 2 )
def lowerCAmelCase (__UpperCamelCase : float ):
"""simple docstring"""
return np.array(
[
[gamma(__UpperCamelCase ), -gamma(__UpperCamelCase ) * beta(__UpperCamelCase ), 0, 0],
[-gamma(__UpperCamelCase ) * beta(__UpperCamelCase ), gamma(__UpperCamelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase (__UpperCamelCase : float , __UpperCamelCase : np.ndarray | None = None ):
"""simple docstring"""
if event is None:
__UpperCamelCase =np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__UpperCamelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__lowercase = transform(29_979_245)
print('''Example of four vector: ''')
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
__lowercase = {ct: c, x: 1, y: 1, z: 1}
__lowercase = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 296
| 0
|
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCamelCase = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
_UpperCamelCase = {
"""Salesforce/codegen-350M-mono""": 2048,
}
class __magic_name__ ( snake_case_ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
lowerCamelCase__ = CodeGenTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase=False , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
if kwargs.pop("add_bos_token" , lowerCamelCase ):
__A : Tuple = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
__A : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__A : str = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__A : int = add_prefix_space
__A : int = pre_tok_class(**lowerCamelCase )
__A : Union[str, Any] = add_prefix_space
def lowerCAmelCase__ ( self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
__A : Any = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def lowerCAmelCase__ ( self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
__A : int = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
__A : Any = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
__A : Dict = super().decode(
token_ids=lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , **lowerCamelCase , )
if truncate_before_pattern is not None and len(lowerCamelCase ) > 0:
__A : str = self.truncate(lowerCamelCase , lowerCamelCase )
return decoded_text
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
def find_re(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__A : Dict = pattern.search(lowerCamelCase , lowerCamelCase )
return m.start() if m else -1
__A : List[Any] = [re.compile(lowerCamelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
__A : Any = list(re.finditer("^print" , lowerCamelCase , re.MULTILINE ) )
if len(lowerCamelCase ) > 1:
__A : Optional[Any] = completion[: prints[1].start()]
__A : Any = list(re.finditer("^def" , lowerCamelCase , re.MULTILINE ) )
if len(lowerCamelCase ) > 1:
__A : str = completion[: defs[1].start()]
__A : Tuple = 0
__A : List[Any] = [
pos for pos in [find_re(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for terminal in terminals] if pos != -1
]
if len(lowerCamelCase ) > 0:
return completion[: min(lowerCamelCase )]
else:
return completion
| 111
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 417
| 0
|
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class __A :
"""simple docstring"""
A_ = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
A_ = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
A_ = field(
default=1_0_2_4 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
A_ = field(
default=a , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
A_ = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A_ = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
A_ = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
A_ = field(
default=a , metadata={'help': 'A csv or a json file containing the training data.'} )
A_ = field(
default=a , metadata={'help': 'A csv or a json file containing the validation data.'} )
A_ = field(default=a , metadata={'help': 'A csv or a json file containing the test data.'} )
def snake_case_( self )-> Tuple:
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowercase__ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowercase__ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __A :
"""simple docstring"""
A_ = field(
default=a , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ = field(
default=a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A_ = field(
default=a , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
A_ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A_ = field(
default=a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def _lowerCAmelCase ( ) ->Any:
"""simple docstring"""
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase__ = training_args.get_process_log_level()
logger.setLevel(lowercase )
datasets.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowercase__ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowercase__ = data_args.train_file.split('''.''' )[-1]
lowercase__ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowercase__ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowercase__ = load_dataset('''csv''' , data_files=lowercase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowercase__ = load_dataset('''json''' , data_files=lowercase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowercase__ = raw_datasets['''train'''].features['''label'''].names
lowercase__ = len(lowercase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowercase__ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase , )
lowercase__ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowercase__ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase__ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowercase__ = {'''Refused''': 0, '''Entailed''': 1}
lowercase__ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
lowercase__ = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase : Tuple ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase : Tuple ):
lowercase__ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowercase__ = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowercase__ = examples['''statement''']
lowercase__ = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
lowercase__ = tokenizer(lowercase , lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase )
lowercase__ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowercase__ = raw_datasets.map(
lowercase , batched=lowercase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowercase__ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowercase__ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowercase__ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowercase__ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowercase__ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowercase__ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase : EvalPrediction ):
lowercase__ = p.predictions[0] if isinstance(p.predictions , lowercase ) else p.predictions
lowercase__ = np.argmax(lowercase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase__ = default_data_collator
elif training_args.fpaa:
lowercase__ = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 )
else:
lowercase__ = None
# Initialize our Trainer
lowercase__ = Trainer(
model=lowercase , args=lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
lowercase__ = None
if training_args.resume_from_checkpoint is not None:
lowercase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ = last_checkpoint
lowercase__ = trainer.train(resume_from_checkpoint=lowercase )
lowercase__ = train_result.metrics
lowercase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase )
)
lowercase__ = min(lowercase , len(lowercase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , lowercase )
trainer.save_metrics('''train''' , lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ = trainer.evaluate(eval_dataset=lowercase )
lowercase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase )
lowercase__ = min(lowercase , len(lowercase ) )
trainer.log_metrics('''eval''' , lowercase )
trainer.save_metrics('''eval''' , lowercase )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowercase__ = predict_dataset.remove_columns('''label''' )
lowercase__ = trainer.predict(lowercase , metric_key_prefix='''predict''' ).predictions
lowercase__ = np.argmax(lowercase , axis=1 )
lowercase__ = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowercase , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowercase ):
lowercase__ = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
lowercase__ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
def _lowerCAmelCase ( lowercase : List[Any] ) ->Any:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 318
|
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( lowercase : str , lowercase : str , lowercase : str ) ->Any:
"""simple docstring"""
def get_masked_lm_array(lowercase : str ):
lowercase__ = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase__ = tf.train.load_variable(lowercase , lowercase )
if "kernel" in name:
lowercase__ = array.transpose()
return torch.from_numpy(lowercase )
def get_encoder_array(lowercase : str ):
lowercase__ = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase__ = tf.train.load_variable(lowercase , lowercase )
if "kernel" in name:
lowercase__ = array.transpose()
return torch.from_numpy(lowercase )
def get_encoder_layer_array(lowercase : int , lowercase : str ):
lowercase__ = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase__ = tf.train.load_variable(lowercase , lowercase )
if "kernel" in name:
lowercase__ = array.transpose()
return torch.from_numpy(lowercase )
def get_encoder_attention_layer_array(lowercase : int , lowercase : str , lowercase : Any ):
lowercase__ = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase__ = tf.train.load_variable(lowercase , lowercase )
lowercase__ = array.reshape(lowercase )
if "kernel" in name:
lowercase__ = array.transpose()
return torch.from_numpy(lowercase )
print(F'''Loading model based on config from {config_path}...''' )
lowercase__ = BertConfig.from_json_file(lowercase )
lowercase__ = BertForMaskedLM(lowercase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
lowercase__ = model.bert.encoder.layer[layer_index]
# Self-attention
lowercase__ = layer.attention.self
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
lowercase__ = layer.attention.output
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
lowercase__ = get_encoder_attention_layer_array(
lowercase , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
lowercase__ = get_encoder_layer_array(lowercase , '''_attention_layer_norm/gamma''' )
lowercase__ = get_encoder_layer_array(lowercase , '''_attention_layer_norm/beta''' )
# Intermediate
lowercase__ = layer.intermediate
lowercase__ = get_encoder_layer_array(lowercase , '''_intermediate_dense/kernel''' )
lowercase__ = get_encoder_layer_array(lowercase , '''_intermediate_dense/bias''' )
# Output
lowercase__ = layer.output
lowercase__ = get_encoder_layer_array(lowercase , '''_output_dense/kernel''' )
lowercase__ = get_encoder_layer_array(lowercase , '''_output_dense/bias''' )
lowercase__ = get_encoder_layer_array(lowercase , '''_output_layer_norm/gamma''' )
lowercase__ = get_encoder_layer_array(lowercase , '''_output_layer_norm/beta''' )
# Embeddings
lowercase__ = get_encoder_array('''_position_embedding_layer/embeddings''' )
lowercase__ = get_encoder_array('''_type_embedding_layer/embeddings''' )
lowercase__ = get_encoder_array('''_embedding_norm_layer/gamma''' )
lowercase__ = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
lowercase__ = model.cls.predictions.transform
lowercase__ = get_masked_lm_array('''dense/kernel''' )
lowercase__ = get_masked_lm_array('''dense/bias''' )
lowercase__ = get_masked_lm_array('''layer_norm/gamma''' )
lowercase__ = get_masked_lm_array('''layer_norm/beta''' )
lowercase__ = get_masked_lm_array('''embedding_table''' )
# Pooling
lowercase__ = BertPooler(config=lowercase )
lowercase__ = get_encoder_array('''_pooler_layer/kernel''' )
lowercase__ = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(lowercase )
# Integration test - should load without any errors ;)
lowercase__ = BertForMaskedLM.from_pretrained(lowercase )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowerCAmelCase = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 318
| 1
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : str = None
A__ : str = BloomTokenizerFast
A__ : List[str] = BloomTokenizerFast
A__ : Union[str, Any] = True
A__ : int = False
A__ : List[Any] = "tokenizer_file"
A__ : Optional[Any] = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def _a ( self : Dict ):
"""simple docstring"""
super().setUp()
A__ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : Optional[int] , **_snake_case : Dict ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.get_rust_tokenizer()
A__ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
A__ = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
A__ = tokenizer.batch_encode_plus(_snake_case )['input_ids']
self.assertListEqual(_snake_case , _snake_case )
A__ = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def _a ( self : Optional[int] , _snake_case : Dict=6 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(_snake_case , max_length=_snake_case )
tokenizer_r.encode_plus(_snake_case , max_length=_snake_case )
tokenizer_r.batch_encode_plus(_snake_case , max_length=_snake_case )
tokenizer_r.encode(_snake_case , max_length=_snake_case )
tokenizer_r.batch_encode_plus(_snake_case , max_length=_snake_case )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
A__ = None # Hotfixing padding = None
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='max_length' )
# Simple input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='max_length' )
# Simple input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='max_length' , )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='max_length' )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='max_length' )
# Pair input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='max_length' , )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.get_rust_tokenizer()
A__ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=_snake_case )
A__ = next(iter(_snake_case ) )['premise'] # pick up one data
A__ = list(sample_data.values() )
A__ = list(map(tokenizer.encode , _snake_case ) )
A__ = [tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case ) for x in output_tokens]
self.assertListEqual(_snake_case , _snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 9
|
import math
def UpperCAmelCase ( UpperCAmelCase )-> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase ,UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCAmelCase )
if number < 1:
SCREAMING_SNAKE_CASE_ = f'''Input value of [number={number}] must be > 0'''
raise ValueError(UpperCAmelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
SCREAMING_SNAKE_CASE_ = int(math.log(number // 3 ,2 ) ) + 2
SCREAMING_SNAKE_CASE_ = [3, 5]
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
for block in range(1 ,UpperCAmelCase ):
for _ in range(UpperCAmelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
A_ = 0
try:
A_ = proth(number)
except ValueError:
print(F'ValueError: there is no {number}th Proth number')
continue
print(F'The {number}th Proth number: {value}')
| 393
| 0
|
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = " "):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
for index, char in enumerate(_UpperCAmelCase):
if char == separator:
split_words.append(string[last_index:index])
SCREAMING_SNAKE_CASE = index + 1
elif index + 1 == len(_UpperCAmelCase):
split_words.append(string[last_index : index + 1])
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 707
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( A__ ):
_lowercase : Union[str, Any] = ['''image_processor''', '''tokenizer''']
_lowercase : Optional[int] = '''Pix2StructImageProcessor'''
_lowercase : str = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , a , a) -> Any:
SCREAMING_SNAKE_CASE = False
super().__init__(a , a)
def __call__( self , a=None , a = None , a = True , a = False , a = None , a = None , a = 2048 , a = 0 , a = None , a = None , a = False , a = False , a = False , a = False , a = False , a = True , a = None , **a , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
# Get only text
if images is None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE = self.tokenizer
SCREAMING_SNAKE_CASE = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
SCREAMING_SNAKE_CASE = self.image_processor(
a , return_tensors=a , max_patches=a , **a)
else:
# add pixel_values and bbox
SCREAMING_SNAKE_CASE = self.image_processor(
a , return_tensors=a , max_patches=a , header_text=a , **a)
if text is not None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
if "attention_mask" in text_encoding:
SCREAMING_SNAKE_CASE = text_encoding.pop('attention_mask')
if "input_ids" in text_encoding:
SCREAMING_SNAKE_CASE = text_encoding.pop('input_ids')
else:
SCREAMING_SNAKE_CASE = None
if text_encoding is not None:
encoding_image_processor.update(a)
return encoding_image_processor
def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> List[Any]:
return self.tokenizer.batch_decode(*a , **a)
def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> Optional[int]:
return self.tokenizer.decode(*a , **a)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 444
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase :Tuple = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
lowerCamelCase :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 667
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ):
lowerCAmelCase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCAmelCase = 4
lowerCAmelCase = 48
lowerCAmelCase = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase = [6, 6, 6, 6]
lowerCAmelCase = 60
lowerCAmelCase = [6, 6, 6, 6]
lowerCAmelCase = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase = 4
lowerCAmelCase = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = 126
lowerCAmelCase = 7
lowerCAmelCase = 255.0
lowerCAmelCase = ''
return config
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
if "patch_embed.proj" in name and "layers" not in name:
lowerCAmelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowerCAmelCase = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
lowerCAmelCase = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
lowerCAmelCase = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
lowerCAmelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCAmelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCAmelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCAmelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
lowerCAmelCase = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
lowerCAmelCase = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
lowerCAmelCase = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
lowerCAmelCase = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
lowerCAmelCase = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
lowerCAmelCase = 'layernorm.weight'
if name == "norm.bias":
lowerCAmelCase = 'layernorm.bias'
if "conv_first" in name:
lowerCAmelCase = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCAmelCase = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCAmelCase = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
lowerCAmelCase = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
lowerCAmelCase = name.replace('upsample.2' , 'upsample.convolution_1' )
lowerCAmelCase = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
lowerCAmelCase = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
lowerCAmelCase = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
lowerCAmelCase = 'swin2sr.' + name
return name
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase = orig_state_dict.pop(_UpperCAmelCase )
if "qkv" in key:
lowerCAmelCase = key.split('.' )
lowerCAmelCase = int(key_split[1] )
lowerCAmelCase = int(key_split[4] )
lowerCAmelCase = config.embed_dim
if "weight" in key:
lowerCAmelCase = val[:dim, :]
lowerCAmelCase = val[dim : dim * 2, :]
lowerCAmelCase = val[-dim:, :]
else:
lowerCAmelCase = val[:dim]
lowerCAmelCase = val[dim : dim * 2]
lowerCAmelCase = val[-dim:]
pass
else:
lowerCAmelCase = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ):
lowerCAmelCase = get_config(_UpperCAmelCase )
lowerCAmelCase = SwinaSRForImageSuperResolution(_UpperCAmelCase )
model.eval()
lowerCAmelCase = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' )
lowerCAmelCase = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase ,lowerCAmelCase = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_UpperCAmelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
lowerCAmelCase = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
lowerCAmelCase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCAmelCase = 126 if 'Jpeg' in checkpoint_url else 256
lowerCAmelCase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowerCAmelCase = transforms(_UpperCAmelCase ).unsqueeze(0 )
if config.num_channels == 1:
lowerCAmelCase = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCAmelCase = model(_UpperCAmelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 512, 512] )
lowerCAmelCase = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 512, 512] )
lowerCAmelCase = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _UpperCAmelCase , atol=1e-3 )
print('Looks ok!' )
lowerCAmelCase = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
lowerCAmelCase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4
| 0
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
a_ : Optional[Any] = (EulerDiscreteScheduler,)
a_ : Tuple = 10
def A__ (self , **UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase ={
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_a)
return config
def A__ (self):
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_a)
def A__ (self):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=_a , beta_end=_a)
def A__ (self):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a)
def A__ (self):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.scheduler_classes[0]
__UpperCAmelCase =self.get_scheduler_config()
__UpperCAmelCase =scheduler_class(**_a)
scheduler.set_timesteps(self.num_inference_steps)
__UpperCAmelCase =torch.manual_seed(0)
__UpperCAmelCase =self.dummy_model()
__UpperCAmelCase =self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCAmelCase =sample.to(_a)
for i, t in enumerate(scheduler.timesteps):
__UpperCAmelCase =scheduler.scale_model_input(_a , _a)
__UpperCAmelCase =model(_a , _a)
__UpperCAmelCase =scheduler.step(_a , _a , _a , generator=_a)
__UpperCAmelCase =output.prev_sample
__UpperCAmelCase =torch.sum(torch.abs(_a))
__UpperCAmelCase =torch.mean(torch.abs(_a))
assert abs(result_sum.item() - 10.0807) < 1e-2
assert abs(result_mean.item() - 0.0131) < 1e-3
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.scheduler_classes[0]
__UpperCAmelCase =self.get_scheduler_config(prediction_type='''v_prediction''')
__UpperCAmelCase =scheduler_class(**_a)
scheduler.set_timesteps(self.num_inference_steps)
__UpperCAmelCase =torch.manual_seed(0)
__UpperCAmelCase =self.dummy_model()
__UpperCAmelCase =self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCAmelCase =sample.to(_a)
for i, t in enumerate(scheduler.timesteps):
__UpperCAmelCase =scheduler.scale_model_input(_a , _a)
__UpperCAmelCase =model(_a , _a)
__UpperCAmelCase =scheduler.step(_a , _a , _a , generator=_a)
__UpperCAmelCase =output.prev_sample
__UpperCAmelCase =torch.sum(torch.abs(_a))
__UpperCAmelCase =torch.mean(torch.abs(_a))
assert abs(result_sum.item() - 0.0002) < 1e-2
assert abs(result_mean.item() - 2.2676e-06) < 1e-3
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.scheduler_classes[0]
__UpperCAmelCase =self.get_scheduler_config()
__UpperCAmelCase =scheduler_class(**_a)
scheduler.set_timesteps(self.num_inference_steps , device=_a)
__UpperCAmelCase =torch.manual_seed(0)
__UpperCAmelCase =self.dummy_model()
__UpperCAmelCase =self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__UpperCAmelCase =sample.to(_a)
for t in scheduler.timesteps:
__UpperCAmelCase =scheduler.scale_model_input(_a , _a)
__UpperCAmelCase =model(_a , _a)
__UpperCAmelCase =scheduler.step(_a , _a , _a , generator=_a)
__UpperCAmelCase =output.prev_sample
__UpperCAmelCase =torch.sum(torch.abs(_a))
__UpperCAmelCase =torch.mean(torch.abs(_a))
assert abs(result_sum.item() - 10.0807) < 1e-2
assert abs(result_mean.item() - 0.0131) < 1e-3
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.scheduler_classes[0]
__UpperCAmelCase =self.get_scheduler_config()
__UpperCAmelCase =scheduler_class(**_a , use_karras_sigmas=_a)
scheduler.set_timesteps(self.num_inference_steps , device=_a)
__UpperCAmelCase =torch.manual_seed(0)
__UpperCAmelCase =self.dummy_model()
__UpperCAmelCase =self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__UpperCAmelCase =sample.to(_a)
for t in scheduler.timesteps:
__UpperCAmelCase =scheduler.scale_model_input(_a , _a)
__UpperCAmelCase =model(_a , _a)
__UpperCAmelCase =scheduler.step(_a , _a , _a , generator=_a)
__UpperCAmelCase =output.prev_sample
__UpperCAmelCase =torch.sum(torch.abs(_a))
__UpperCAmelCase =torch.mean(torch.abs(_a))
assert abs(result_sum.item() - 124.52_2994_9951_1719) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963) < 1e-3
| 712
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
UpperCamelCase_ = get_logger()
UpperCamelCase_ = None
class _SCREAMING_SNAKE_CASE ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase):
'''simple docstring'''
super().__init__(features=UpperCAmelCase)
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCAmelCase , UpperCAmelCase):
raise ValueError(
f"""Expected {device} to be a `str` not {type(UpperCAmelCase)}, as `jaxlib.xla_extension.Device` """
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''')
__UpperCAmelCase =device if isinstance(UpperCAmelCase , UpperCAmelCase) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__UpperCAmelCase =self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default """
f"""device: {str(jax.devices()[0])}.""")
__UpperCAmelCase =str(jax.devices()[0])
__UpperCAmelCase =jnp_array_kwargs
@staticmethod
def A__ ():
'''simple docstring'''
import jax
return {str(UpperCAmelCase): device for device in jax.devices()}
def A__ (self , UpperCAmelCase):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , UpperCAmelCase) and column:
if all(
isinstance(UpperCAmelCase , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column):
return jnp.stack(UpperCAmelCase , axis=0)
return column
def A__ (self , UpperCAmelCase):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase))):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
__UpperCAmelCase ={}
if isinstance(UpperCAmelCase , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
__UpperCAmelCase ={'''dtype''': jnp.intaa}
else:
__UpperCAmelCase ={'''dtype''': jnp.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
__UpperCAmelCase ={'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image):
__UpperCAmelCase =np.asarray(UpperCAmelCase)
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__UpperCAmelCase =self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs})
def A__ (self , UpperCAmelCase):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCAmelCase , torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(UpperCAmelCase , '''__array__''') and not isinstance(UpperCAmelCase , jax.Array):
__UpperCAmelCase =data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase) for substruct in data_struct])
elif isinstance(UpperCAmelCase , (list, tuple)):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase) for substruct in data_struct])
return self._tensorize(UpperCAmelCase)
def A__ (self , UpperCAmelCase):
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase)
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.numpy_arrow_extractor().extract_row(UpperCAmelCase)
__UpperCAmelCase =self.python_features_decoder.decode_row(UpperCAmelCase)
return self.recursive_tensorize(UpperCAmelCase)
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.numpy_arrow_extractor().extract_column(UpperCAmelCase)
__UpperCAmelCase =self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0])
__UpperCAmelCase =self.recursive_tensorize(UpperCAmelCase)
__UpperCAmelCase =self._consolidate(UpperCAmelCase)
return column
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.numpy_arrow_extractor().extract_batch(UpperCAmelCase)
__UpperCAmelCase =self.python_features_decoder.decode_batch(UpperCAmelCase)
__UpperCAmelCase =self.recursive_tensorize(UpperCAmelCase)
for column_name in batch:
__UpperCAmelCase =self._consolidate(batch[column_name])
return batch
| 142
| 0
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : List[str] = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
SCREAMING_SNAKE_CASE : str = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Optional[Any] = ["""input_ids""", """attention_mask"""]
UpperCAmelCase : Optional[Any] = TaTokenizer
UpperCAmelCase : List[int] = []
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="</s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_=100 , UpperCamelCase_=None , **UpperCamelCase_ , ) -> Optional[Any]:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
a_ : str = [f"""<extra_id_{i}>""" for i in range(UpperCamelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
a_ : Any = len(set(filter(lambda UpperCamelCase_ : bool("""extra_id_""" in str(UpperCamelCase_ ) ) , UpperCamelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , extra_ids=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
a_ : str = vocab_file
a_ : Any = False if not self.vocab_file else True
a_ : List[str] = extra_ids
@staticmethod
def A ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
a_ : int = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCamelCase_ , )
return max_model_length
def A ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Union[str, Any]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a_ : Optional[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def A ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> str:
"""simple docstring"""
a_ : Optional[int] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
a_ : Any = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def A ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> int:
"""simple docstring"""
a_ : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def A ( self ) -> List[Any]:
"""simple docstring"""
return list(
set(filter(lambda UpperCamelCase_ : bool(re.search(R"""<extra_id_\d+>""" , UpperCamelCase_ ) ) is not None , self.additional_special_tokens ) ) )
def A ( self ) -> List[str]:
"""simple docstring"""
return [self.convert_tokens_to_ids(UpperCamelCase_ ) for token in self.get_sentinel_tokens()]
| 419
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A__ :
# setable values
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def UpperCamelCase__ ( cls ):
return cls()
@dataclass
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : KarrasVeSchedulerState
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
return True
@register_to_config
def __init__( self , __magic_name__ = 0.02 , __magic_name__ = 1_0_0 , __magic_name__ = 1.007 , __magic_name__ = 8_0 , __magic_name__ = 0.05 , __magic_name__ = 5_0 , ):
pass
def UpperCamelCase__ ( self ):
return KarrasVeSchedulerState.create()
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = () ):
lowerCamelCase : Dict = jnp.arange(0 , __magic_name__ )[::-1].copy()
lowerCamelCase : int = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__magic_name__ , schedule=jnp.array(__magic_name__ , dtype=jnp.floataa ) , timesteps=__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase : List[Any] = random.split(__magic_name__ , num=1 )
lowerCamelCase : Union[str, Any] = self.config.s_noise * random.normal(key=__magic_name__ , shape=sample.shape )
lowerCamelCase : List[Any] = sigma + gamma * sigma
lowerCamelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : Optional[Any] = sample_hat + sigma_hat * model_output
lowerCamelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : str = sample_prev + sigma_prev * model_output
lowerCamelCase : str = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
raise NotImplementedError()
| 681
| 0
|
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class A_ :
"""simple docstring"""
def __init__( self : Tuple ,__A : int=2 ,__A : List[Any]=3 ,__A : Optional[Any]=64 ,__A : List[str]=None ) -> Tuple:
_lowercase = np.random.default_rng(__A )
_lowercase = length
_lowercase = rng.normal(size=(length,) ).astype(np.floataa )
_lowercase = a * self.x + b + rng.normal(scale=0.1 ,size=(length,) ).astype(np.floataa )
def __len__( self : Dict ) -> int:
return self.length
def __getitem__( self : List[str] ,__A : List[Any] ) -> Tuple:
return {"x": self.x[i], "y": self.y[i]}
class A_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] ,__A : Optional[int]=0 ,__A : Any=0 ,__A : Dict=False ) -> List[Any]:
super().__init__()
_lowercase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowercase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowercase = True
def __UpperCAmelCase ( self : int ,__A : str=None ) -> Dict:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
_lowercase = False
return x * self.a[0] + self.b[0]
class A_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] ,__A : Union[str, Any]=0 ,__A : Optional[int]=0 ,__A : str=False ) -> Optional[Any]:
super().__init__()
_lowercase = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowercase = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowercase = True
def __UpperCAmelCase ( self : List[Any] ,__A : Optional[Any]=None ) -> str:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
_lowercase = False
return x * self.a + self.b
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] , snake_case__ :int = 16 ) -> int:
from datasets import load_dataset
from transformers import AutoTokenizer
_lowercase = AutoTokenizer.from_pretrained('bert-base-cased' )
_lowercase = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
_lowercase = load_dataset('csv' , data_files=snake_case__ )
_lowercase = datasets['train'].unique('label' )
_lowercase = {v: i for i, v in enumerate(snake_case__ )}
def tokenize_function(snake_case__ :str ):
# max_length=None => use the model max length (it's actually the default)
_lowercase = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ , padding='max_length' )
if "label" in examples:
_lowercase = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowercase = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(snake_case__ :Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(snake_case__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_lowercase = DataLoader(tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=2 )
_lowercase = DataLoader(tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 535
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 535
| 1
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
# Initialise PyTorch model
snake_case__ = BigBirdConfig.from_json_file(__lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
snake_case__ = BigBirdForQuestionAnswering(__lowerCAmelCase )
else:
snake_case__ = BigBirdForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__lowerCAmelCase , __lowerCAmelCase , is_trivia_qa=__lowerCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
lowerCamelCase__ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 33
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _lowerCAmelCase ( unittest.TestCase ):
A__ = MODEL_FOR_CAUSAL_LM_MAPPING
A__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
__UpperCAmelCase , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
lowerCAmelCase__ : str = text_generator('''This is a test''' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
] , )
lowerCAmelCase__ : List[Any] = text_generator.model.config.eos_token_id
lowerCAmelCase__ : List[Any] = '''<pad>'''
lowerCAmelCase__ : List[Any] = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , )
self.assertEqual(
__UpperCAmelCase , [
[
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
],
[
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Dict = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
return text_generator, ["This is a test", "Another test"]
def __magic_name__( self ):
lowerCAmelCase__ : Any = '''Hello I believe in'''
lowerCAmelCase__ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : Optional[int] = text_generator(__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , stop_sequence=''' fe''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = text_generator.model
lowerCAmelCase__ : Optional[int] = text_generator.tokenizer
lowerCAmelCase__ : Tuple = text_generator('''This is a test''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ : Dict = pipeline(task='''text-generation''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase )
lowerCAmelCase__ : Dict = text_generator('''This is a test''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ : List[str] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCAmelCase__ : List[str] = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
] , )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Any = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : str = text_generator('''test''' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCAmelCase__ : str = text_generator('''''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCAmelCase__ : List[str] = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCAmelCase__ : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
lowerCAmelCase__ : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__UpperCAmelCase ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __magic_name__( self ):
import torch
# Classic `model_kwargs`
lowerCAmelCase__ : List[str] = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ : Any = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCAmelCase__ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ : Union[str, Any] = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCAmelCase__ : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCAmelCase__ : Any = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def __magic_name__( self ):
import torch
lowerCAmelCase__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def __magic_name__( self ):
import torch
lowerCAmelCase__ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=__UpperCAmelCase , top_p=0.5 )
def __magic_name__( self ):
lowerCAmelCase__ : int = '''Hello world'''
lowerCAmelCase__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
lowerCAmelCase__ : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' )
else:
lowerCAmelCase__ : Dict = logging.get_logger('''transformers.generation.utils''' )
lowerCAmelCase__ : Optional[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(__UpperCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : Any = text_generator(__UpperCAmelCase , max_new_tokens=1 )
self.assertNotIn(__UpperCAmelCase , cl.out )
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : Union[str, Any] = text_generator(__UpperCAmelCase , max_length=10 )
self.assertNotIn(__UpperCAmelCase , cl.out )
| 678
| 0
|
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __UpperCamelCase ( __lowerCamelCase : Optional[int] ) -> int:
'''simple docstring'''
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def __UpperCamelCase ( __lowerCamelCase : Any ) -> Any:
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self , __UpperCamelCase ) -> Dict:
_a = metric_id
class __SCREAMING_SNAKE_CASE :
UpperCAmelCase = [MetricMock(__SCREAMING_SNAKE_CASE ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def a_ ( self ) -> Optional[Any]:
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def __UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
if "tmp_path" in args:
_a = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase__ , match="https://huggingface.co/docs/evaluate" ):
func(*lowerCAmelCase__ )
| 721
|
'''simple docstring'''
lowercase__ = 256
# Modulus to hash a string
lowercase__ = 1_000_003
def __UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ) -> bool:
'''simple docstring'''
_a = len(__lowerCamelCase )
_a = len(__lowerCamelCase )
if p_len > t_len:
return False
_a = 0
_a = 0
_a = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowerCamelCase ):
_a = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_a = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_a = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_a = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __UpperCamelCase ( ) -> None:
'''simple docstring'''
_a = "abc1abc12"
_a = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_a = "alskfjaldsk23adsfabcabc"
assert rabin_karp(__lowerCamelCase , __lowerCamelCase ) and not rabin_karp(__lowerCamelCase , __lowerCamelCase )
# Test 2)
_a = "ABABX"
_a = "ABABZABABYABABX"
assert rabin_karp(__lowerCamelCase , __lowerCamelCase )
# Test 3)
_a = "AAAB"
_a = "ABAAAAAB"
assert rabin_karp(__lowerCamelCase , __lowerCamelCase )
# Test 4)
_a = "abcdabcy"
_a = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(__lowerCamelCase , __lowerCamelCase )
# Test 5)
_a = "Lü"
_a = "Lüsai"
assert rabin_karp(__lowerCamelCase , __lowerCamelCase )
_a = "Lue"
assert not rabin_karp(__lowerCamelCase , __lowerCamelCase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 276
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _UpperCamelCase ( _lowercase ):
'''simple docstring'''
a_ : int = '''decision_transformer'''
a_ : Union[str, Any] = ['''past_key_values''']
a_ : Dict = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Tuple , _lowerCamelCase : Any=1_7 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : Optional[int]=1_2_8 , _lowerCamelCase : Any=4_0_9_6 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : int=1 , _lowerCamelCase : List[Any]=1_0_2_4 , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : Optional[Any]=1 , _lowerCamelCase : List[str]=None , _lowerCamelCase : Any="relu" , _lowerCamelCase : Any=0.1 , _lowerCamelCase : int=0.1 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : List[str]=1E-5 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : Dict=True , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[Any]=5_0_2_5_6 , _lowerCamelCase : Dict=5_0_2_5_6 , _lowerCamelCase : int=False , _lowerCamelCase : Any=False , **_lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
__lowerCamelCase : str = state_dim
__lowerCamelCase : Optional[Any] = act_dim
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : int = max_ep_len
__lowerCamelCase : Optional[int] = action_tanh
__lowerCamelCase : Tuple = vocab_size
__lowerCamelCase : Union[str, Any] = n_positions
__lowerCamelCase : Tuple = n_layer
__lowerCamelCase : Tuple = n_head
__lowerCamelCase : Optional[Any] = n_inner
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : Optional[Any] = resid_pdrop
__lowerCamelCase : List[Any] = embd_pdrop
__lowerCamelCase : int = attn_pdrop
__lowerCamelCase : Optional[Any] = layer_norm_epsilon
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : Tuple = scale_attn_weights
__lowerCamelCase : List[Any] = use_cache
__lowerCamelCase : Optional[int] = scale_attn_by_inverse_layer_idx
__lowerCamelCase : Optional[int] = reorder_and_upcast_attn
__lowerCamelCase : Union[str, Any] = bos_token_id
__lowerCamelCase : List[str] = eos_token_id
super().__init__(bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
| 519
|
from __future__ import annotations
from typing import Any
class __UpperCamelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 ) -> None:
a__ , a__ = row, column
a__ = [[default_value for c in range(SCREAMING_SNAKE_CASE )] for r in range(SCREAMING_SNAKE_CASE )]
def __str__( self ) -> str:
a__ = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
a__ = 0
for row_vector in self.array:
for obj in row_vector:
a__ = max(SCREAMING_SNAKE_CASE , len(str(SCREAMING_SNAKE_CASE ) ) )
a__ = f"%{max_element_length}s"
# Make string and return
def single_line(SCREAMING_SNAKE_CASE ) -> str:
nonlocal string_format_identifier
a__ = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(SCREAMING_SNAKE_CASE ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
return str(self )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> bool:
if not (isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and len(SCREAMING_SNAKE_CASE ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , SCREAMING_SNAKE_CASE ) -> Any:
assert self.validate_indicies(SCREAMING_SNAKE_CASE )
return self.array[loc[0]][loc[1]]
def __setitem__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
assert self.validate_indicies(SCREAMING_SNAKE_CASE )
a__ = value
def __add__( self , SCREAMING_SNAKE_CASE ) -> Matrix:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert self.row == another.row and self.column == another.column
# Add
a__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a__ = self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
a__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a__ = -self[r, c]
return result
def __sub__( self , SCREAMING_SNAKE_CASE ) -> Matrix:
return self + (-another)
def __mul__( self , SCREAMING_SNAKE_CASE ) -> Matrix:
if isinstance(SCREAMING_SNAKE_CASE , (int, float) ): # Scalar multiplication
a__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a__ = self[r, c] * another
return result
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): # Matrix multiplication
assert self.column == another.row
a__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
a__ = f"Unsupported type given for another ({type(SCREAMING_SNAKE_CASE )})"
raise TypeError(SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Matrix:
a__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
a__ = self[r, c]
return result
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
a__ = v.transpose()
a__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __a ( ):
# a^(-1)
a__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
a__ = 1
print(f"a^(-1) is {ainv}" )
# u, v
a__ = Matrix(3 , 1 , 0 )
a__ , a__ , a__ = 1, 2, -3
a__ = Matrix(3 , 1 , 0 )
a__ , a__ , a__ = 4, -2, 5
print(f"u is {u}" )
print(f"v is {v}" )
print(f"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(f"(a + uv^T)^(-1) is {ainv.sherman_morrison(__UpperCAmelCase , __UpperCAmelCase )}" )
def __a ( ):
import doctest
doctest.testmod()
testa()
| 194
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Dict = logging.get_logger(__name__)
snake_case__ : int = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class _a ( _A ):
"""simple docstring"""
snake_case ="""open-llama"""
def __init__( self , _snake_case=10_0000 , _snake_case=4096 , _snake_case=1_1008 , _snake_case=32 , _snake_case=32 , _snake_case="silu" , _snake_case=2048 , _snake_case=0.02 , _snake_case=1E-6 , _snake_case=True , _snake_case=0 , _snake_case=1 , _snake_case=2 , _snake_case=False , _snake_case=True , _snake_case=0.1 , _snake_case=0.1 , _snake_case=True , _snake_case=True , _snake_case=None , **_snake_case , ):
_UpperCAmelCase =vocab_size
_UpperCAmelCase =max_position_embeddings
_UpperCAmelCase =hidden_size
_UpperCAmelCase =intermediate_size
_UpperCAmelCase =num_hidden_layers
_UpperCAmelCase =num_attention_heads
_UpperCAmelCase =hidden_act
_UpperCAmelCase =initializer_range
_UpperCAmelCase =rms_norm_eps
_UpperCAmelCase =use_cache
_UpperCAmelCase =kwargs.pop(
"use_memorry_efficient_attention" , __lowerCamelCase )
_UpperCAmelCase =hidden_dropout_prob
_UpperCAmelCase =attention_dropout_prob
_UpperCAmelCase =use_stable_embedding
_UpperCAmelCase =shared_input_output_embedding
_UpperCAmelCase =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase , )
def SCREAMING_SNAKE_CASE ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"got {self.rope_scaling}" )
_UpperCAmelCase =self.rope_scaling.get("type" , __lowerCamelCase )
_UpperCAmelCase =self.rope_scaling.get("factor" , __lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 717
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 592
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 12
|
"""simple docstring"""
import requests
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {"""Content-Type""": """application/json"""}
UpperCAmelCase__ : Optional[Any] = requests.post(__UpperCamelCase , json={"""text""": message_body} , headers=__UpperCamelCase )
if response.status_code != 200:
UpperCAmelCase__ : Any = (
"""Request to slack returned an error """
F"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(__UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 65
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : Any, lowerCamelCase : Tuple )-> None:
if len(lowercase__ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowerCamelCase__ : list[float] =list(lowercase__ )
lowerCamelCase__ : Optional[int] =degree
def __add__( self : List[str], lowerCamelCase : Tuple )-> Polynomial:
if self.degree > polynomial_a.degree:
lowerCamelCase__ : Optional[Any] =self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, lowercase__ )
else:
lowerCamelCase__ : Any =polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, lowercase__ )
def __sub__( self : str, lowerCamelCase : Optional[Any] )-> Polynomial:
return self + polynomial_a * Polynomial(0, [-1] )
def __neg__( self : Any )-> Polynomial:
return Polynomial(self.degree, [-c for c in self.coefficients] )
def __mul__( self : str, lowerCamelCase : List[Any] )-> Polynomial:
lowerCamelCase__ : list[float] =[0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, lowercase__ )
def snake_case ( self : str, lowerCamelCase : str )-> int | float:
lowerCamelCase__ : int | float =0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : str )-> str:
lowerCamelCase__ : str =''''''
for i in range(self.degree, -1, -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowercase__ )
return polynomial
def __repr__( self : Union[str, Any] )-> str:
return self.__str__()
def snake_case ( self : Union[str, Any] )-> Polynomial:
lowerCamelCase__ : list[float] =[0] * self.degree
for i in range(self.degree ):
lowerCamelCase__ : Optional[Any] =self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, lowercase__ )
def snake_case ( self : Union[str, Any], lowerCamelCase : str = 0 )-> Polynomial:
lowerCamelCase__ : list[float] =[0] * (self.degree + 2)
lowerCamelCase__ : Union[str, Any] =constant
for i in range(self.degree + 1 ):
lowerCamelCase__ : List[Any] =self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, lowercase__ )
def __eq__( self : Any, lowerCamelCase : Optional[int] )-> bool:
if not isinstance(lowercase__, lowercase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str], lowerCamelCase : str )-> bool:
return not self.__eq__(lowercase__ )
| 703
|
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[int] ):
"""simple docstring"""
if not numbers:
return 0
if not isinstance(__lowerCamelCase , (list, tuple) ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCamelCase__ : Any =numbers[0]
for i in range(1 , len(__lowerCamelCase ) ):
# update the maximum and minimum subarray products
lowerCamelCase__ : Dict =numbers[i]
if number < 0:
lowerCamelCase__ , lowerCamelCase__ : List[Any] =min_till_now, max_till_now
lowerCamelCase__ : Optional[int] =max(__lowerCamelCase , max_till_now * number )
lowerCamelCase__ : Dict =min(__lowerCamelCase , min_till_now * number )
# update the maximum product found till now
lowerCamelCase__ : Tuple =max(__lowerCamelCase , __lowerCamelCase )
return max_prod
| 625
| 0
|
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
a = logging.getLogger(__name__)
def _snake_case ( _snake_case : torch.nn.Module , _snake_case : BnbQuantizationConfig , _snake_case : Union[str, os.PathLike] = None , _snake_case : Optional[Dict[str, Union[int, str, torch.device]]] = None , _snake_case : Optional[List[str]] = None , _snake_case : Optional[Dict[Union[int, str], Union[int, str]]] = None , _snake_case : Optional[Union[str, os.PathLike]] = None , _snake_case : bool = False , ) -> Any:
'''simple docstring'''
_A = bnb_quantization_config.load_in_abit
_A = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
_A = []
# custom device map
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(device_map.keys() ) > 1:
_A = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_A = get_keys_to_not_convert(lowerCamelCase__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowerCamelCase__ )
_A = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_A = []
_A = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowerCamelCase__ )
# compatibility with peft
_A = load_in_abit
_A = load_in_abit
_A = get_parameter_device(lowerCamelCase__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
_A = replace_with_bnb_layers(lowerCamelCase__ , lowerCamelCase__ , modules_to_not_convert=lowerCamelCase__ )
# convert param to the right dtype
_A = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_A = name.replace('.weight' , '' ).replace('.bias' , '' )
_A = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowerCamelCase__ ):
param.to(lowerCamelCase__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
_A = replace_with_bnb_layers(
lowerCamelCase__ , lowerCamelCase__ , modules_to_not_convert=lowerCamelCase__ )
_A = get_quantized_model_device_map(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , max_memory=lowerCamelCase__ , no_split_module_classes=lowerCamelCase__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_A = True
_A = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowerCamelCase__ , offload_state_dict=lowerCamelCase__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowerCamelCase__ , device_map=lowerCamelCase__ , offload_dir=lowerCamelCase__ )
def _snake_case ( _snake_case : Dict , _snake_case : int , _snake_case : List[str]=None , _snake_case : int=None , _snake_case : Any=None ) -> Any:
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
_A = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
_A = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_A = {}
_A = special_dtypes
_A = no_split_module_classes
_A = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_A = get_balanced_memory(
lowerCamelCase__ , low_zero=(device_map == 'balanced_low_0') , max_memory=lowerCamelCase__ , **lowerCamelCase__ , )
_A = max_memory
_A = infer_auto_device_map(lowerCamelCase__ , **lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
# check if don't have any quantized module on the cpu
_A = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_A = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def _snake_case ( _snake_case : Dict , _snake_case : List[Any] , _snake_case : Any=None , _snake_case : List[Any]=None ) -> Dict:
'''simple docstring'''
if modules_to_not_convert is None:
_A = []
_A = _replace_with_bnb_layers(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def _snake_case ( _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Tuple=None , _snake_case : int=None , ) -> List[str]:
'''simple docstring'''
_A = False
for name, module in model.named_children():
if current_key_name is None:
_A = []
current_key_name.append(lowerCamelCase__ )
if isinstance(lowerCamelCase__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_A = """.""".join(lowerCamelCase__ )
_A = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_A = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_A = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowerCamelCase__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_A = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
_A = module.weight.data
if module.bias is not None:
_A = module.bias.data
bnb_module.requires_grad_(lowerCamelCase__ )
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_A = True
if len(list(module.children() ) ) > 0:
_A = _replace_with_bnb_layers(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_A = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _snake_case ( _snake_case : Any ) -> List[str]:
'''simple docstring'''
with init_empty_weights():
_A = deepcopy(lowerCamelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_A = find_tied_parameters(lowerCamelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_A = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_A = sum(lowerCamelCase__ , [] )
_A = len(lowerCamelCase__ ) > 0
# Check if it is a base model
_A = False
if hasattr(lowerCamelCase__ , 'base_model_prefix' ):
_A = not hasattr(lowerCamelCase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_A = list(model.named_children() )
_A = [list_modules[-1][0]]
# add last module together with tied weights
_A = set(lowerCamelCase__ ) - set(lowerCamelCase__ )
_A = list(set(lowerCamelCase__ ) ) + list(lowerCamelCase__ )
# remove ".weight" from the keys
_A = [""".weight""", """.bias"""]
_A = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_A = name.replace(lowerCamelCase__ , '' )
filtered_module_names.append(lowerCamelCase__ )
return filtered_module_names
def _snake_case ( _snake_case : Dict ) -> int:
'''simple docstring'''
for m in model.modules():
if isinstance(lowerCamelCase__ , bnb.nn.Linearabit ):
return True
return False
def _snake_case ( _snake_case : nn.Module ) -> int:
'''simple docstring'''
return next(parameter.parameters() ).device
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : str , _snake_case : str , _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : Any ) -> List[str]:
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(lowerCamelCase__ , lowerCamelCase__ , 0 , dtype=lowerCamelCase__ , value=lowerCamelCase__ )
_A = param_name
_A = model
if "." in tensor_name:
_A = tensor_name.split('.' )
for split in splits[:-1]:
_A = getattr(lowerCamelCase__ , lowerCamelCase__ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
_A = new_module
_A = splits[-1]
# offload weights
_A = False
offload_weight(module._parameters[tensor_name] , lowerCamelCase__ , lowerCamelCase__ , index=lowerCamelCase__ )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , lowerCamelCase__ , index=lowerCamelCase__ , )
else:
offload_weight(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index=lowerCamelCase__ )
offload_weight(lowerCamelCase__ , param_name.replace('weight' , 'SCB' ) , lowerCamelCase__ , index=lowerCamelCase__ )
set_module_tensor_to_device(lowerCamelCase__ , lowerCamelCase__ , 'meta' , dtype=lowerCamelCase__ , value=torch.empty(*param.size() ) )
| 7
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : str = tempfile.mkdtemp()
lowercase__ : List[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase__ : Optional[Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowercase__ : Any = os.path.join(self.tmpdirname , lowerCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self , **lowerCamelCase__ ) -> str:
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase__( self , **lowerCamelCase__ ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase__( self , **lowerCamelCase__ ) -> List[Any]:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__( self ) -> int:
lowercase__ : Union[str, Any] = self.get_tokenizer()
lowercase__ : Union[str, Any] = self.get_rust_tokenizer()
lowercase__ : Any = self.get_image_processor()
lowercase__ : Union[str, Any] = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ : Tuple = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase__ )
lowercase__ : List[Any] = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ : str = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> int:
lowercase__ : Optional[int] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ : Any = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
lowercase__ : Union[str, Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Tuple = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : Optional[int] = self.prepare_image_inputs()
lowercase__ : Tuple = image_processor(lowerCamelCase__ , return_tensors="""np""" )
lowercase__ : str = processor(images=lowerCamelCase__ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : Union[str, Any] = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : List[str] = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : Any = """lower newer"""
lowercase__ : List[Any] = processor(text=lowerCamelCase__ )
lowercase__ : Tuple = tokenizer(lowerCamelCase__ , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__( self ) -> str:
lowercase__ : List[Any] = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Union[str, Any] = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : List[Any] = """lower newer"""
lowercase__ : List[str] = self.prepare_image_inputs()
lowercase__ : List[Any] = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def UpperCAmelCase__( self ) -> Union[str, Any]:
lowercase__ : int = self.get_image_processor()
lowercase__ : Union[str, Any] = self.get_tokenizer()
lowercase__ : Tuple = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : Tuple = processor.batch_decode(lowerCamelCase__ )
lowercase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Optional[int] = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : str = """lower newer"""
lowercase__ : List[Any] = self.prepare_image_inputs()
lowercase__ : int = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 200
| 0
|
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
a : Tuple = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
a : Tuple = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
a : List[str] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
a : Tuple = F'''down_blocks.{i}.resnets.{j}.'''
a : Optional[int] = F'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
a : Dict = F'''down_blocks.{i}.attentions.{j}.'''
a : int = F'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
a : Dict = F'''up_blocks.{i}.resnets.{j}.'''
a : int = F'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
a : Any = F'''up_blocks.{i}.attentions.{j}.'''
a : Optional[int] = F'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
a : Dict = F'''down_blocks.{i}.downsamplers.0.conv.'''
a : int = F'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
a : Tuple = F'''up_blocks.{i}.upsamplers.0.'''
a : Optional[Any] = F'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
a : Optional[Any] = '''mid_block.attentions.0.'''
a : Optional[int] = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
a : List[str] = F'''mid_block.resnets.{j}.'''
a : Optional[Any] = F'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Any:
'''simple docstring'''
a : Optional[Any] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
a : str = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
a : str = v.replace(_lowercase , _lowercase )
a : Tuple = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
a : Any = v.replace(_lowercase , _lowercase )
a : Any = v
a : Any = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
a : Any = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
a : List[str] = F'''encoder.down_blocks.{i}.resnets.{j}.'''
a : Dict = F'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
a : int = F'''down_blocks.{i}.downsamplers.0.'''
a : List[Any] = F'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
a : Tuple = F'''up_blocks.{i}.upsamplers.0.'''
a : List[str] = F'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
a : Union[str, Any] = F'''decoder.up_blocks.{i}.resnets.{j}.'''
a : Optional[Any] = F'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
a : Dict = F'''mid_block.resnets.{i}.'''
a : Tuple = F'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
a : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->int:
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
a : Optional[int] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
a : Tuple = v.replace(_lowercase , _lowercase )
a : int = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
a : int = v.replace(_lowercase , _lowercase )
a : int = v
a : Dict = {v: vae_state_dict[k] for k, v in mapping.items()}
a : int = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"""mid.attn_1.{weight_name}.weight""" in k:
print(F"""Reshaping {k} for SD format""" )
a : Optional[int] = reshape_weight_for_sd(_lowercase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
a : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
a : Dict = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
a : str = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
a : Optional[int] = {'''q''': 0, '''k''': 1, '''v''': 2}
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->Union[str, Any]:
'''simple docstring'''
a : int = {}
a : Optional[Any] = {}
a : Dict = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
a : Dict = k[: -len(".q_proj.weight" )]
a : str = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
a : List[Any] = [None, None, None]
a : Union[str, Any] = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
a : str = k[: -len(".q_proj.bias" )]
a : Tuple = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
a : List[Any] = [None, None, None]
a : str = v
continue
a : str = textenc_pattern.sub(lambda _lowercase : protected[re.escape(m.group(0 ) )] , _lowercase )
a : Any = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
a : str = textenc_pattern.sub(lambda _lowercase : protected[re.escape(m.group(0 ) )] , _lowercase )
a : Dict = torch.cat(_lowercase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
a : Optional[Any] = textenc_pattern.sub(lambda _lowercase : protected[re.escape(m.group(0 ) )] , _lowercase )
a : str = torch.cat(_lowercase )
return new_state_dict
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] ) ->Optional[int]:
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
a : List[str] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
a : Any = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
a : Union[str, Any] = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
a : Dict = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
a : Dict = load_file(unet_path, device='''cpu''')
else:
a : Optional[Any] = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
a : int = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
a : Dict = load_file(vae_path, device='''cpu''')
else:
a : Any = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
a : str = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
a : str = load_file(text_enc_path, device='''cpu''')
else:
a : Union[str, Any] = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
a : Dict = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
a : Any = convert_unet_state_dict(unet_state_dict)
a : List[Any] = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
a : Optional[int] = convert_vae_state_dict(vae_state_dict)
a : Tuple = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
a : Optional[int] = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
a : int = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
a : List[Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
a : Optional[int] = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
a : str = convert_text_enc_state_dict(text_enc_dict)
a : List[Any] = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
a : Any = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
a : Optional[Any] = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
a : str = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 701
|
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a : Any = logging.get_logger(__name__)
a : Tuple = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> str:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
a : Optional[int] = model
a : int = kwargs.get("model_save_dir" , lowerCAmelCase__ )
a : Tuple = kwargs.get("latest_model_name" , lowerCAmelCase__ )
def __call__( self , **lowerCAmelCase__ ) -> Dict:
a : List[str] = {k: np.array(lowerCAmelCase__ ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase__ , lowerCAmelCase__ )
@staticmethod
def __a ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Union[str, Any]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
a : List[str] = "CPUExecutionProvider"
return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> int:
a : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
a : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
a : List[str] = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
a : str = self.model_save_dir.joinpath(lowerCAmelCase__ )
if src_path.exists():
a : Any = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
def __a ( self , lowerCAmelCase__ , **lowerCAmelCase__ , ) -> str:
if os.path.isfile(lowerCAmelCase__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# saving model weights/files
self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[int]:
a : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase__ ):
a : Tuple = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
a : Tuple = Path(lowerCAmelCase__ )
# load model from hub
else:
# download model
a : Optional[Any] = hf_hub_download(
repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , )
a : Optional[int] = Path(lowerCAmelCase__ ).parent
a : List[Any] = Path(lowerCAmelCase__ ).name
a : int = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
return cls(model=lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]:
a : Any = None
if len(str(lowerCAmelCase__ ).split("@" ) ) == 2:
a, a : Tuple = model_id.split("@" )
return cls._from_pretrained(
model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 31
| 0
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__UpperCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class __lowercase :
snake_case_ = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
snake_case_ = field(
default=__lowerCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
snake_case_ = field(
default=__lowerCamelCase , metadata={"""help""": """The column name of the images in the files."""} )
snake_case_ = field(default=__lowerCamelCase , metadata={"""help""": """A folder containing the training data."""} )
snake_case_ = field(default=__lowerCamelCase , metadata={"""help""": """A folder containing the validation data."""} )
snake_case_ = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
snake_case_ = field(
default=__lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
snake_case_ = field(
default=__lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {}
if self.train_dir is not None:
UpperCAmelCase__ : str = self.train_dir
if self.validation_dir is not None:
UpperCAmelCase__ : int = self.validation_dir
UpperCAmelCase__ : int = data_files if data_files else None
@dataclass
class __lowercase :
snake_case_ = field(
default=__lowerCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
snake_case_ = field(
default=__lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
snake_case_ = field(
default=__lowerCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
snake_case_ = field(
default=__lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
snake_case_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
snake_case_ = field(default=__lowerCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
snake_case_ = field(
default=__lowerCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
snake_case_ = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
snake_case_ = field(
default=__lowerCamelCase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class __lowercase ( __lowerCamelCase ):
snake_case_ = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , __UpperCamelCase , __UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase__ : List[Any] = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCAmelCase__ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase__ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCAmelCase__ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase__ : List[Any] = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __UpperCamelCase ) and data_args.train_val_split > 0.0:
UpperCAmelCase__ : List[Any] = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCAmelCase__ : Optional[Any] = split["""train"""]
UpperCAmelCase__ : Tuple = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase__ : Optional[Any] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCAmelCase__ : int = ViTMAEConfig.from_pretrained(model_args.config_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCAmelCase__ : List[str] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
UpperCAmelCase__ : List[str] = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCAmelCase__ : Any = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCAmelCase__ : Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
UpperCAmelCase__ : Dict = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCAmelCase__ : Tuple = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCAmelCase__ : Any = ViTMAEForPreTraining(__UpperCamelCase )
if training_args.do_train:
UpperCAmelCase__ : int = ds["""train"""].column_names
else:
UpperCAmelCase__ : int = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCAmelCase__ : Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
UpperCAmelCase__ : Optional[Any] = """image"""
elif "img" in column_names:
UpperCAmelCase__ : Dict = """img"""
else:
UpperCAmelCase__ : Dict = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCAmelCase__ : Optional[int] = image_processor.size["""shortest_edge"""]
else:
UpperCAmelCase__ : List[str] = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCAmelCase__ : Dict = Compose(
[
Lambda(lambda __UpperCamelCase : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__UpperCamelCase ):
UpperCAmelCase__ : Optional[int] = [transforms(__UpperCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCAmelCase__ : Dict = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__UpperCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCAmelCase__ : str = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__UpperCamelCase )
# Compute absolute learning rate
UpperCAmelCase__ : List[Any] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCAmelCase__ : str = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCAmelCase__ : int = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
UpperCAmelCase__ : List[str] = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase__ : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase__ : Union[str, Any] = last_checkpoint
UpperCAmelCase__ : Dict = trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase__ : Any = trainer.evaluate()
trainer.log_metrics("""eval""" , __UpperCamelCase )
trainer.save_metrics("""eval""" , __UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCAmelCase__ : Union[str, Any] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCamelCase )
else:
trainer.create_model_card(**__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 65
|
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowercase : Optional[int] = ["small", "medium", "large"]
lowercase : Optional[int] = "lm_head.decoder.weight"
lowercase : List[Any] = "lm_head.weight"
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Dict:
_snake_case = torch.load(__A )
_snake_case = d.pop(__A )
os.makedirs(__A , exist_ok=__A )
torch.save(__A , os.path.join(__A , __A ) )
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
lowercase : str = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowercase : List[Any] = os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''')
lowercase : Tuple = F'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 495
| 0
|
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=_lowerCamelCase ):
a = ['''speech''']
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""speech"""] )
class _UpperCAmelCase ( metaclass=_lowerCamelCase ):
a = ['''speech''']
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""speech"""] )
| 481
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=3 , a__=30 , a__=400 , a__=True , a__=None , a__=True , a__=[0.5, 0.5, 0.5] , a__=[0.5, 0.5, 0.5] , a__=True , a__=1 / 255 , a__=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A_ : Optional[int] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
A_ : str = parent
A_ : Dict = batch_size
A_ : str = num_channels
A_ : List[str] = min_resolution
A_ : List[Any] = max_resolution
A_ : Optional[Any] = do_resize
A_ : int = size
A_ : List[Any] = do_normalize
A_ : List[Any] = image_mean
A_ : Dict = image_std
A_ : Tuple = do_rescale
A_ : List[Any] = rescale_factor
A_ : Tuple = do_pad
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCamelCase ( self , a__ , a__=False ):
if not batched:
A_ : List[str] = image_inputs[0]
if isinstance(a__ , Image.Image ):
A_ , A_ : Tuple = image.size
else:
A_ , A_ : List[Any] = image.shape[1], image.shape[2]
if w < h:
A_ : List[str] = int(self.size["""shortest_edge"""] * h / w )
A_ : Tuple = self.size["""shortest_edge"""]
elif w > h:
A_ : Tuple = self.size["""shortest_edge"""]
A_ : Union[str, Any] = int(self.size["""shortest_edge"""] * w / h )
else:
A_ : Optional[Any] = self.size["""shortest_edge"""]
A_ : int = self.size["""shortest_edge"""]
else:
A_ : List[Any] = []
for image in image_inputs:
A_ , A_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : List[Any] = max(a__ , key=lambda a__ : item[0] )[0]
A_ : Tuple = max(a__ , key=lambda a__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = DetaImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
A_ : str = DetaImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
A_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , """image_mean""" ) )
self.assertTrue(hasattr(a__ , """image_std""" ) )
self.assertTrue(hasattr(a__ , """do_normalize""" ) )
self.assertTrue(hasattr(a__ , """do_resize""" ) )
self.assertTrue(hasattr(a__ , """do_rescale""" ) )
self.assertTrue(hasattr(a__ , """do_pad""" ) )
self.assertTrue(hasattr(a__ , """size""" ) )
def _lowerCamelCase ( self ):
A_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , a__ )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
A_ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Optional[int] = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : Optional[Any] = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
A_ : Optional[int] = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ):
# Initialize image_processing
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Optional[int] = image_processing(a__ , return_tensors="""pt""" ).pixel_values
A_ , A_ : Union[str, Any] = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ):
# Initialize image_processing
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
A_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : str = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : str = image_processing(a__ , return_tensors="""pt""" ).pixel_values
A_ , A_ : List[Any] = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowerCamelCase ( self ):
# prepare image and target
A_ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A_ : int = json.loads(f.read() )
A_ : Union[str, Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
A_ : str = DetaImageProcessor()
A_ : int = image_processing(images=a__ , annotations=a__ , return_tensors="""pt""" )
# verify pixel values
A_ : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , a__ )
A_ : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , a__ , atol=1E-4 ) )
# verify area
A_ : int = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , a__ ) )
# verify boxes
A_ : str = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , a__ )
A_ : List[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , a__ , atol=1E-3 ) )
# verify image_id
A_ : Optional[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , a__ ) )
# verify is_crowd
A_ : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , a__ ) )
# verify class_labels
A_ : Optional[int] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , a__ ) )
# verify orig_size
A_ : Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , a__ ) )
# verify size
A_ : Tuple = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , a__ ) )
@slow
def _lowerCamelCase ( self ):
# prepare image, target and masks_path
A_ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A_ : Any = json.loads(f.read() )
A_ : Any = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
A_ : Tuple = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A_ : str = DetaImageProcessor(format="""coco_panoptic""" )
A_ : Tuple = image_processing(images=a__ , annotations=a__ , masks_path=a__ , return_tensors="""pt""" )
# verify pixel values
A_ : str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , a__ )
A_ : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , a__ , atol=1E-4 ) )
# verify area
A_ : Dict = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , a__ ) )
# verify boxes
A_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , a__ )
A_ : List[str] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , a__ , atol=1E-3 ) )
# verify image_id
A_ : int = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , a__ ) )
# verify is_crowd
A_ : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , a__ ) )
# verify class_labels
A_ : Union[str, Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , a__ ) )
# verify masks
A_ : Tuple = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , a__ )
# verify orig_size
A_ : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , a__ ) )
# verify size
A_ : Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , a__ ) )
| 481
| 1
|
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowerCamelCase_ = TypeVar('''T''')
lowerCamelCase_ = Union[List[T], Tuple[T, ...]]
lowerCamelCase_ = Union[T, List[T], Dict[str, T]]
lowerCamelCase_ = Union[str, bytes, os.PathLike]
| 95
|
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [0 for i in range(r + 1 )]
# nc0 = 1
SCREAMING_SNAKE_CASE_ : Dict = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
SCREAMING_SNAKE_CASE_ : Optional[int] = min(lowerCamelCase_ , lowerCamelCase_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 105
| 0
|
'''simple docstring'''
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE__ ( torch.nn.Module ):
def __init__( self , lowercase__="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(lowercase__ , self ).__init__()
SCREAMING_SNAKE_CASE_ : int = AutoModel.from_pretrained(lowercase__ , return_dict=lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = torch.nn.CosineSimilarity(3 , 1e-08 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.nn.Softmax(dim=1 )
def __lowerCamelCase ( self , **lowercase__ ):
"""simple docstring"""
return self.bert(**lowercase__ ).last_hidden_state
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=lowercase__ )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(lowercase__ , lowercase__ ) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = W_supports["sizes"].tolist()
SCREAMING_SNAKE_CASE_ : List[str] = W_supports["start_token_id"].item()
SCREAMING_SNAKE_CASE_ : Any = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
SCREAMING_SNAKE_CASE_ : Any = self.BERT(**lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.BERT(**lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = W_supports["input_ids"] == start_token_id
SCREAMING_SNAKE_CASE_ : str = W_supports["input_ids"] == end_token_id
for i, size in enumerate(lowercase__ ):
if i == 0:
SCREAMING_SNAKE_CASE_ : Tuple = 0
else:
SCREAMING_SNAKE_CASE_ : int = support_sizes[i - 1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = S[s : s + size][start_token_masks[s : s + size]]
SCREAMING_SNAKE_CASE_ : Tuple = S[s : s + size][end_token_masks[s : s + size]]
SCREAMING_SNAKE_CASE_ : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
SCREAMING_SNAKE_CASE_ : List[str] = torch.vstack((p_starts, p_start) )
SCREAMING_SNAKE_CASE_ : int = torch.vstack((p_ends, p_end) )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = p_start
SCREAMING_SNAKE_CASE_ : int = p_end
return p_starts, p_ends
| 68
|
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
SCREAMING_SNAKE_CASE_ : int = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE_ : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE_ : Dict = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
snake_case_ = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 68
| 1
|
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list , snake_case__ :list ) -> float:
_validate_point(snake_case__ )
_validate_point(snake_case__ )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[float] ) -> None:
if point:
if isinstance(snake_case__ , snake_case__ ):
for item in point:
if not isinstance(snake_case__ , (int, float) ):
_lowercase = (
'Expected a list of numbers as input, found '
F"""{type(snake_case__ ).__name__}"""
)
raise TypeError(snake_case__ )
else:
_lowercase = F"""Expected a list of numbers as input, found {type(snake_case__ ).__name__}"""
raise TypeError(snake_case__ )
else:
raise ValueError('Missing an input' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list , snake_case__ :list ) -> float:
_validate_point(snake_case__ )
_validate_point(snake_case__ )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(snake_case__ , snake_case__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : list[float] ) -> float:
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
UpperCAmelCase_ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__UpperCamelCase ) )
return round(__UpperCamelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 144
| 0
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
snake_case_ = inspect.getfile(accelerate.test_utils )
snake_case_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
snake_case_ = test_metrics
@require_cpu
def UpperCamelCase__ ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCamelCase__ ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCamelCase__ ( self ):
self.test_metrics.main()
@require_multi_gpu
def UpperCamelCase__ ( self ):
print(F'''Found {torch.cuda.device_count()} devices.''' )
snake_case_ = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
| 531
|
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
assert column_title.isupper()
snake_case_ = 0
snake_case_ = len(SCREAMING_SNAKE_CASE ) - 1
snake_case_ = 0
while index >= 0:
snake_case_ = (ord(column_title[index] ) - 64) * pow(26 , SCREAMING_SNAKE_CASE )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 531
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCamelCase_ (__A ):
__magic_name__ = '''dandelin/vilt-b32-finetuned-vqa'''
__magic_name__ = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
__magic_name__ = '''image_qa'''
__magic_name__ = AutoProcessor
__magic_name__ = AutoModelForVisualQuestionAnswering
__magic_name__ = ['''image''', '''text''']
__magic_name__ = ['''text''']
def __init__( self : Optional[Any] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : List[Any] ) -> Optional[int]:
requires_backends(self , ["vision"] )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : "Image" , lowerCAmelCase_ : str ) -> Optional[Any]:
return self.pre_processor(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors="pt" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : str ) -> Union[str, Any]:
with torch.no_grad():
return self.model(**lowerCAmelCase_ ).logits
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Any ) -> str:
UpperCAmelCase_ : List[Any] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 95
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class lowerCamelCase ( __lowerCamelCase ):
UpperCamelCase_ : Union[str, Any] = 'mgp-str'
def __init__( self :Union[str, Any] , lowercase :Union[str, Any]=[3_2, 1_2_8] , lowercase :Tuple=4 , lowercase :Optional[int]=3 , lowercase :Union[str, Any]=2_7 , lowercase :List[str]=3_8 , lowercase :Optional[Any]=5_0_2_5_7 , lowercase :str=3_0_5_2_2 , lowercase :Dict=7_6_8 , lowercase :List[Any]=1_2 , lowercase :List[str]=1_2 , lowercase :Optional[Any]=4.0 , lowercase :Optional[Any]=True , lowercase :str=False , lowercase :Optional[Any]=1e-5 , lowercase :List[str]=0.0 , lowercase :List[Any]=0.0 , lowercase :Optional[int]=0.0 , lowercase :Dict=False , lowercase :Optional[Any]=0.02 , **lowercase :Optional[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = max_token_length
SCREAMING_SNAKE_CASE = num_character_labels
SCREAMING_SNAKE_CASE = num_bpe_labels
SCREAMING_SNAKE_CASE = num_wordpiece_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = distilled
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = drop_rate
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = attn_drop_rate
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = output_aa_attentions
SCREAMING_SNAKE_CASE = initializer_range
| 201
| 0
|
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__lowerCamelCase : str = 637_8137.0
__lowerCamelCase : List[Any] = 635_6752.31_4245
__lowerCamelCase : int = 637_8137
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
UpperCamelCase : Dict = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCamelCase : Union[str, Any] = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
UpperCamelCase : List[Any] = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCamelCase : Union[str, Any] = haversine_distance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCamelCase : Union[str, Any] = (b_lata + b_lata) / 2
UpperCamelCase : Optional[int] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCamelCase : int = (sin(SCREAMING_SNAKE_CASE__ ) ** 2) * (cos(SCREAMING_SNAKE_CASE__ ) ** 2)
UpperCamelCase : Optional[Any] = cos(sigma / 2 ) ** 2
UpperCamelCase : Optional[Any] = (sigma - sin(SCREAMING_SNAKE_CASE__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCamelCase : Optional[int] = (cos(SCREAMING_SNAKE_CASE__ ) ** 2) * (sin(SCREAMING_SNAKE_CASE__ ) ** 2)
UpperCamelCase : List[str] = sin(sigma / 2 ) ** 2
UpperCamelCase : Optional[Any] = (sigma + sin(SCREAMING_SNAKE_CASE__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__lowerCamelCase : Dict = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__lowerCamelCase : Tuple = {
"""facebook/blenderbot_small-90M""": 512,
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Optional[Any] = BlenderbotSmallTokenizer
def __init__( self , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , A_=True , **A_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=A_ , merges=A_ , add_prefix_space=A_ , trim_offsets=A_ , ) , bos_token=A_ , eos_token=A_ , unk_token=A_ , **A_ , )
UpperCamelCase : Union[str, Any] = add_prefix_space
def __UpperCamelCase( self , A_ , A_=None ):
'''simple docstring'''
UpperCamelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 38
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def _lowerCAmelCase ( lowercase : int , lowercase : int , lowercase : bool , lowercase : list[int] , lowercase : float ) ->int:
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , lowercase , lowercase , lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase , lowercase , lowercase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , lowercase , lowercase , lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase , lowercase , lowercase ) , )
)
def _lowerCAmelCase ( ) ->None:
"""simple docstring"""
lowercase__ = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
lowercase__ = math.log(len(lowercase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , lowercase , lowercase , lowercase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 161
|
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __A ( a ):
"""simple docstring"""
A_ = 0
A_ = False
A_ = 3.0
class __A ( unittest.TestCase ):
"""simple docstring"""
def snake_case_( self )-> Dict:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_lowerCamelCase ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'''a''': 2, '''c''': 2.2_5} )
@require_cuda
def snake_case_( self )-> Optional[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowercase__ = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 )
AcceleratorState._reset_state()
lowercase__ = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
lowercase__ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_0_0_0 )
self.assertEqual(scaler._enabled , _lowerCamelCase )
@require_multi_gpu
def snake_case_( self )-> Union[str, Any]:
lowercase__ = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
_lowerCAmelCase = Accelerator(kwargs_handlers=[ddp_scaler])
_lowerCAmelCase = torch.nn.Linear(1_0_0, 2_0_0)
_lowerCAmelCase = accelerator.prepare(model)
# Check the values changed in kwargs
_lowerCAmelCase = ""
_lowerCAmelCase = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 161
| 1
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=18 , snake_case_=30 , snake_case_=400 , snake_case_=True , snake_case_=None , snake_case_=True , ) -> Any:
UpperCamelCase__ = size if size is not None else {'height': 18, 'width': 18}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = do_normalize
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __lowerCamelCase ( _a , unittest.TestCase ):
a : Any =ImageGPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = ImageGPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , 'clusters' ) )
self.assertTrue(hasattr(snake_case_ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case_ , 'size' ) )
self.assertTrue(hasattr(snake_case_ , 'do_normalize' ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase__ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , obj[key] ) )
else:
self.assertEqual(obj[key] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = os.path.join(snake_case_ , 'image_processor.json' )
image_processor_first.to_json_file(snake_case_ )
UpperCamelCase__ = self.image_processing_class.from_json_file(snake_case_ ).to_dict()
UpperCamelCase__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(snake_case_ )
UpperCamelCase__ = self.image_processing_class.from_pretrained(snake_case_ ).to_dict()
UpperCamelCase__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
pass
def lowerCAmelCase_( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
UpperCamelCase__ = Image.open(dataset[4]['file'] )
UpperCamelCase__ = Image.open(dataset[5]['file'] )
UpperCamelCase__ = [imagea, imagea]
return images
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
UpperCamelCase__ = prepare_images()
# test non-batched
UpperCamelCase__ = image_processing(images[0] , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
UpperCamelCase__ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , snake_case_ )
# test batched
UpperCamelCase__ = image_processing(snake_case_ , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
UpperCamelCase__ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , snake_case_ )
| 721
|
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase__ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = pentagonal_nums[j]
UpperCamelCase__ = pentagonal_i + pentagonal_j
UpperCamelCase__ = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE ) and is_pentagonal(SCREAMING_SNAKE_CASE ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20
| 0
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A_ = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 604
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 604
| 1
|
from functools import reduce
UpperCamelCase__ : Any = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def A_( A = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda A , A : str(int(A ) * int(A ) ) , n[i : i + 13] ) )
for i in range(len(A ) - 12 ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 486
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def A_( A ):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCamelCase__ : Any = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class _UpperCamelCase ( A_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowercase : ArgumentParser ):
'''simple docstring'''
UpperCAmelCase_ = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=__lowercase , required=__lowercase , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=__lowercase , required=__lowercase , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=__lowercase , required=__lowercase , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=__lowercase , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=__lowercase , default=__lowercase , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Optional[int] , __lowercase : str , __lowercase : str , __lowercase : str , __lowercase : str , __lowercase : str , *__lowercase : Tuple , ):
'''simple docstring'''
UpperCAmelCase_ = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(F"""Loading model {model_type}""" )
UpperCAmelCase_ = model_type
UpperCAmelCase_ = tf_checkpoint
UpperCAmelCase_ = pytorch_dump_output
UpperCAmelCase_ = config
UpperCAmelCase_ = finetuning_task_name
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
if "ckpt" in self._tf_checkpoint.lower():
UpperCAmelCase_ = self._tf_checkpoint
UpperCAmelCase_ = """"""
else:
UpperCAmelCase_ = self._tf_checkpoint
UpperCAmelCase_ = """"""
convert_transfo_xl_checkpoint_to_pytorch(
__lowercase , self._config , self._pytorch_dump_output , __lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 486
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : List[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 529
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : int = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """trocr"""
a__ = ["""past_key_values"""]
a__ = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : Dict , UpperCamelCase__ : Optional[Any]=5_0265 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : int=12 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : Union[str, Any]=4096 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : str=1 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[str]=2 , **UpperCamelCase__ : Tuple , ) -> Dict:
"""simple docstring"""
__magic_name__ = vocab_size
__magic_name__ = d_model
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = activation_function
__magic_name__ = max_position_embeddings
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = init_std
__magic_name__ = decoder_layerdrop
__magic_name__ = use_cache
__magic_name__ = scale_embedding
__magic_name__ = use_learned_position_embeddings
__magic_name__ = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 529
| 1
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="attention" ):
__lowerCamelCase : Optional[int] = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
__lowerCamelCase : Dict = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
__lowerCamelCase : int = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
__lowerCamelCase : int = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
if split_mlp_wi:
__lowerCamelCase : Optional[Any] = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
__lowerCamelCase : str = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
__lowerCamelCase : int = (wi_a, wi_a)
else:
__lowerCamelCase : Dict = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
__lowerCamelCase : int = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , *, SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = traverse_util.flatten_dict(variables['target'] )
__lowerCamelCase : List[Any] = {'/'.join(SCREAMING_SNAKE_CASE__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__lowerCamelCase : str = 'encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str = collections.OrderedDict()
# Shared embeddings.
__lowerCamelCase : List[Any] = old['token_embedder/embedding']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
__lowerCamelCase : Tuple = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'pre_attention_layer_norm' )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'attention' )
__lowerCamelCase : Any = layer_norm
__lowerCamelCase : Dict = k.T
__lowerCamelCase : Any = o.T
__lowerCamelCase : Dict = q.T
__lowerCamelCase : List[str] = v.T
# Block i, layer 1 (MLP).
__lowerCamelCase : Union[str, Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'pre_mlp_layer_norm' )
__lowerCamelCase , __lowerCamelCase : Dict = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = layer_norm
if split_mlp_wi:
__lowerCamelCase : Any = wi[0].T
__lowerCamelCase : Union[str, Any] = wi[1].T
else:
__lowerCamelCase : Optional[int] = wi.T
__lowerCamelCase : str = wo.T
__lowerCamelCase : Dict = old[
'encoder/relpos_bias/rel_embedding'
].T
__lowerCamelCase : Optional[int] = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
__lowerCamelCase : Dict = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_self_attention_layer_norm' )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'self_attention' )
__lowerCamelCase : Optional[Any] = layer_norm
__lowerCamelCase : List[Any] = k.T
__lowerCamelCase : Any = o.T
__lowerCamelCase : str = q.T
__lowerCamelCase : int = v.T
# Block i, layer 1 (Cross Attention).
__lowerCamelCase : List[str] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_cross_attention_layer_norm' )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'encoder_decoder_attention' )
__lowerCamelCase : Tuple = layer_norm
__lowerCamelCase : str = k.T
__lowerCamelCase : str = o.T
__lowerCamelCase : Dict = q.T
__lowerCamelCase : int = v.T
# Block i, layer 2 (MLP).
__lowerCamelCase : Tuple = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_mlp_layer_norm' )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Any = layer_norm
if split_mlp_wi:
__lowerCamelCase : Dict = wi[0].T
__lowerCamelCase : Dict = wi[1].T
else:
__lowerCamelCase : Tuple = wi.T
__lowerCamelCase : int = wo.T
__lowerCamelCase : Dict = old['decoder/decoder_norm/scale']
__lowerCamelCase : List[str] = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__lowerCamelCase : int = old['decoder/logits_dense/kernel'].T
return new
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[str] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__lowerCamelCase : str = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__lowerCamelCase : Dict = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
__lowerCamelCase : Dict = state_dict['shared.weight']
return state_dict
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Union[str, Any] = convert_tax_to_pytorch(SCREAMING_SNAKE_CASE__ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Any = make_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ):
__lowerCamelCase : str = TaConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__lowerCamelCase : Dict = TaEncoderModel(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase : Optional[int] = TaForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE__ )
print('Done' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
lowercase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 230
|
from math import pow, sqrt
def UpperCamelCase__ ( *SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = len(SCREAMING_SNAKE_CASE__ ) > 0 and all(value > 0.0 for value in values )
return result
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 230
| 1
|
"""simple docstring"""
def __a ( a ):
"""simple docstring"""
if not isinstance(a, a ):
raise ValueError("check_bouncy() accepts only integer arguments" )
_a = str(a )
_a = "".join(sorted(a ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __a ( a = 9_9 ):
"""simple docstring"""
if not 0 < percent < 1_0_0:
raise ValueError("solution() only accepts values from 0 to 100" )
_a = 0
_a = 1
while True:
if check_bouncy(a ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'{solution(99)}')
| 388
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__SCREAMING_SNAKE_CASE = """src/transformers"""
__SCREAMING_SNAKE_CASE = """docs/source/en"""
__SCREAMING_SNAKE_CASE = """."""
def __a ( a, a, a ):
"""simple docstring"""
with open(a, "r", encoding="utf-8", newline="\n" ) as f:
_a = f.readlines()
# Find the start prompt.
_a = 0
while not lines[start_index].startswith(a ):
start_index += 1
start_index += 1
_a = start_index
while not lines[end_index].startswith(a ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__SCREAMING_SNAKE_CASE = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
__SCREAMING_SNAKE_CASE = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
__SCREAMING_SNAKE_CASE = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__SCREAMING_SNAKE_CASE = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
__SCREAMING_SNAKE_CASE = direct_transformers_import(TRANSFORMERS_PATH)
def __a ( a ):
"""simple docstring"""
_a = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", a )
return [m.group(0 ) for m in matches]
def __a ( a, a ):
"""simple docstring"""
_a = 2 if text == "✅" or text == "❌" else len(a )
_a = (width - text_length) // 2
_a = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __a ( ):
"""simple docstring"""
_a = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_a = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_a = {name: config.replace("Config", "" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_a = collections.defaultdict(a )
_a = collections.defaultdict(a )
_a = collections.defaultdict(a )
_a = collections.defaultdict(a )
_a = collections.defaultdict(a )
# Let's lookup through all transformers object (once).
for attr_name in dir(a ):
_a = None
if attr_name.endswith("Tokenizer" ):
_a = slow_tokenizers
_a = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_a = fast_tokenizers
_a = attr_name[:-1_3]
elif _re_tf_models.match(a ) is not None:
_a = tf_models
_a = _re_tf_models.match(a ).groups()[0]
elif _re_flax_models.match(a ) is not None:
_a = flax_models
_a = _re_flax_models.match(a ).groups()[0]
elif _re_pt_models.match(a ) is not None:
_a = pt_models
_a = _re_pt_models.match(a ).groups()[0]
if lookup_dict is not None:
while len(a ) > 0:
if attr_name in model_name_to_prefix.values():
_a = True
break
# Try again after removing the last word in the name
_a = "".join(camel_case_split(a )[:-1] )
# Let's build that table!
_a = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_a = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_a = [len(a ) + 2 for c in columns]
_a = max([len(a ) for name in model_names] ) + 2
# Build the table per se
_a = "|" + "|".join([_center_text(a, a ) for c, w in zip(a, a )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_a = {True: "✅", False: "❌"}
for name in model_names:
_a = model_name_to_prefix[name]
_a = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(a, a ) for l, w in zip(a, a )] ) + "|\n"
return table
def __a ( a=False ):
"""simple docstring"""
_a , _a , _a , _a = _find_text_in_file(
filename=os.path.join(a, "index.md" ), start_prompt="<!--This table is updated automatically from the auto modules", end_prompt="<!-- End table-->", )
_a = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(a, "index.md" ), "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__SCREAMING_SNAKE_CASE = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 388
| 1
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class a_:
"""simple docstring"""
@staticmethod
def __UpperCamelCase ( *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Union[str, Any]) -> str:
"""simple docstring"""
pass
def A_ ( lowercase_ ) ->Dict:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__UpperCAmelCase = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class a_( unittest.TestCase ):
"""simple docstring"""
__snake_case : Optional[int] =MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __UpperCamelCase ( self : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Any) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline(
'document-question-answering' , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(lowerCAmelCase__) , lowerCAmelCase__ , '')))
SCREAMING_SNAKE_CASE = 'What is the placebo?'
SCREAMING_SNAKE_CASE = [
{
'image': load_image(lowerCAmelCase__),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def __UpperCamelCase ( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any]) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = dqa_pipeline(lowerCAmelCase__ , top_k=2)
self.assertEqual(
lowerCAmelCase__ , [
[
{'score': ANY(lowerCAmelCase__), 'answer': ANY(lowerCAmelCase__), 'start': ANY(lowerCAmelCase__), 'end': ANY(lowerCAmelCase__)},
{'score': ANY(lowerCAmelCase__), 'answer': ANY(lowerCAmelCase__), 'start': ANY(lowerCAmelCase__), 'end': ANY(lowerCAmelCase__)},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __UpperCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2')
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = 'How many cats are there?'
SCREAMING_SNAKE_CASE = [
{'score': 0.00_01, 'answer': 'oy 2312/2019', 'start': 3_8, 'end': 3_9},
{'score': 0.00_01, 'answer': 'oy 2312/2019 DUE', 'start': 3_8, 'end': 4_0},
]
SCREAMING_SNAKE_CASE = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2)
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4) , lowerCAmelCase__)
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
SCREAMING_SNAKE_CASE = './tests/fixtures/tests_samples/COCO/000000039769.png'
SCREAMING_SNAKE_CASE = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2)
self.assertEqual(lowerCAmelCase__ , [])
# We can optionnally pass directly the words and bounding boxes
SCREAMING_SNAKE_CASE = './tests/fixtures/tests_samples/COCO/000000039769.png'
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2)
self.assertEqual(lowerCAmelCase__ , [])
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __UpperCamelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = 'What is the invoice number?'
SCREAMING_SNAKE_CASE = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4) , [
[
{'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __UpperCamelCase ( self : List[str]) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=5_0 , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = 'What is the invoice number?'
SCREAMING_SNAKE_CASE = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4) , [
[
{'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __UpperCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowerCAmelCase__ , revision='3dc6de3' , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = 'What is the invoice number?'
SCREAMING_SNAKE_CASE = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4) , [
[
{'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
]
]
* 2 , )
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(lowerCAmelCase__) , lowerCAmelCase__ , '')))
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __UpperCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowerCAmelCase__ , revision='3dc6de3' , max_seq_len=5_0 , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = 'What is the invoice number?'
SCREAMING_SNAKE_CASE = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4) , [
[
{'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(lowerCAmelCase__) , lowerCAmelCase__ , '')))
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
@slow
@require_torch
def __UpperCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa') , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = 'What is the invoice number?'
SCREAMING_SNAKE_CASE = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2)
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4) , [{'answer': 'us-001'}])
@require_tf
@unittest.skip('Document question answering not implemented in TF')
def __UpperCamelCase ( self : Optional[int]) -> int:
"""simple docstring"""
pass
| 709
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A_ ( lowercase_ , lowercase_ ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = args.log_outputs
SCREAMING_SNAKE_CASE = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
SCREAMING_SNAKE_CASE = load_metric('wer' )
SCREAMING_SNAKE_CASE = load_metric('cer' )
# compute metrics
SCREAMING_SNAKE_CASE = wer.compute(references=result['target'] , predictions=result['prediction'] )
SCREAMING_SNAKE_CASE = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
SCREAMING_SNAKE_CASE = f'''WER: {wer_result}\nCER: {cer_result}'''
print(lowercase_ )
with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(lowercase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
SCREAMING_SNAKE_CASE = f'''log_{dataset_id}_predictions.txt'''
SCREAMING_SNAKE_CASE = f'''log_{dataset_id}_targets.txt'''
with open(lowercase_ , 'w' ) as p, open(lowercase_ , 'w' ) as t:
# mapping function to write output
def write_to_file(lowercase_ , lowercase_ ):
p.write(f'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(lowercase_ , with_indices=lowercase_ )
def A_ ( lowercase_ ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
SCREAMING_SNAKE_CASE = re.sub(lowercase_ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
SCREAMING_SNAKE_CASE = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
SCREAMING_SNAKE_CASE = ' '.join(text.split(lowercase_ ) )
return text
def A_ ( lowercase_ ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(args.model_id )
SCREAMING_SNAKE_CASE = feature_extractor.sampling_rate
# resample audio
SCREAMING_SNAKE_CASE = dataset.cast_column('audio' , Audio(sampling_rate=lowercase_ ) )
# load eval pipeline
if args.device is None:
SCREAMING_SNAKE_CASE = 0 if torch.cuda.is_available() else -1
SCREAMING_SNAKE_CASE = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowercase_ ):
SCREAMING_SNAKE_CASE = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
SCREAMING_SNAKE_CASE = prediction['text']
SCREAMING_SNAKE_CASE = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
SCREAMING_SNAKE_CASE = dataset.map(lowercase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowercase_ , lowercase_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
__UpperCAmelCase = parser.parse_args()
main(args)
| 259
| 0
|
"""simple docstring"""
from itertools import product
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
__lowercase = sides_number
__lowercase = max_face_number * dice_number
__lowercase = [0] * (max_total + 1)
__lowercase = 1
__lowercase = range(UpperCamelCase__ , max_face_number + 1 )
for dice_numbers in product(UpperCamelCase__ , repeat=UpperCamelCase__ ):
__lowercase = sum(UpperCamelCase__ )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCAmelCase_ ( ):
"""simple docstring"""
__lowercase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__lowercase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__lowercase = 0
__lowercase = 9
__lowercase = 4 * 9
__lowercase = 6
for peter_total in range(UpperCamelCase__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__lowercase = (4**9) * (6**6)
__lowercase = peter_wins_count / total_games_number
__lowercase = round(UpperCamelCase__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 616
|
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowerCAmelCase_ ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
__lowercase = cva.getAffineTransform(UpperCamelCase__ , UpperCamelCase__ )
return cva.warpAffine(UpperCamelCase__ , UpperCamelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
UpperCAmelCase__ =cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
UpperCAmelCase__ =cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCAmelCase__ , UpperCAmelCase__ =gray_img.shape
# set different points to rotate image
UpperCAmelCase__ =np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
UpperCAmelCase__ =np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
UpperCAmelCase__ =np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
UpperCAmelCase__ =np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
UpperCAmelCase__ =[
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCAmelCase__ =plt.figure(1)
UpperCAmelCase__ =["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 616
| 1
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
__A = "Usage of script: script_name <size_of_canvas:int>"
__A = [0] * 100 + [1] * 10
random.shuffle(choice)
def lowerCAmelCase_ ( __a ) -> list[list[bool]]:
"""simple docstring"""
lowerCamelCase__: Any =[[False for i in range(__a )] for j in range(__a )]
return canvas
def lowerCAmelCase_ ( __a ) -> None:
"""simple docstring"""
for i, row in enumerate(__a ):
for j, _ in enumerate(__a ):
lowerCamelCase__: int =bool(random.getrandbits(1 ) )
def lowerCAmelCase_ ( __a ) -> list[list[bool]]:
"""simple docstring"""
lowerCamelCase__: str =np.array(__a )
lowerCamelCase__: Optional[Any] =np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__a ):
for c, pt in enumerate(__a ):
lowerCamelCase__: Optional[int] =__judge_point(
__a , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
lowerCamelCase__: int =next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
lowerCamelCase__: list[list[bool]] =current_canvas.tolist()
return return_canvas
def lowerCAmelCase_ ( __a , __a ) -> bool:
"""simple docstring"""
lowerCamelCase__: Tuple =0
lowerCamelCase__: List[Any] =0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
lowerCamelCase__: Optional[int] =pt
if pt:
if alive < 2:
lowerCamelCase__: Optional[int] =False
elif alive == 2 or alive == 3:
lowerCamelCase__: Optional[int] =True
elif alive > 3:
lowerCamelCase__: Dict =False
else:
if alive == 3:
lowerCamelCase__: List[Any] =True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
__A = int(sys.argv[1])
# main working structure of this module.
__A = create_canvas(canvas_size)
seed(c)
__A , __A = plt.subplots()
fig.show()
__A = ListedColormap(["w", "k"])
try:
while True:
__A = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 437
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "blip_2_vision_model"
def __init__(self : Union[str, Any] , UpperCAmelCase_ : int=1_408 , UpperCAmelCase_ : List[str]=6_144 , UpperCAmelCase_ : List[Any]=39 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=224 , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : str=0.0_0001 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : str=1E-1_0 , UpperCAmelCase_ : Any=True , **UpperCAmelCase_ : Optional[Any] , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: Any =hidden_size
lowerCamelCase__: Any =intermediate_size
lowerCamelCase__: Union[str, Any] =num_hidden_layers
lowerCamelCase__: Optional[Any] =num_attention_heads
lowerCamelCase__: Dict =patch_size
lowerCamelCase__: List[Any] =image_size
lowerCamelCase__: Union[str, Any] =initializer_range
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: Union[str, Any] =layer_norm_eps
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: Union[str, Any] =qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[int] , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : List[Any]) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: str =cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_)
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type") == "blip-2":
lowerCamelCase__: Any =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "blip_2_qformer"
def __init__(self : str , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[int]=3_072 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[Any]=1E-1_2 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Optional[int]="absolute" , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : int=1_408 , **UpperCAmelCase_ : Optional[int] , ) ->List[str]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[int] =vocab_size
lowerCamelCase__: Dict =hidden_size
lowerCamelCase__: Tuple =num_hidden_layers
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: Optional[Any] =hidden_act
lowerCamelCase__: Optional[Any] =intermediate_size
lowerCamelCase__: Dict =hidden_dropout_prob
lowerCamelCase__: Any =attention_probs_dropout_prob
lowerCamelCase__: Union[str, Any] =max_position_embeddings
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: List[Any] =layer_norm_eps
lowerCamelCase__: Tuple =position_embedding_type
lowerCamelCase__: List[Any] =cross_attention_frequency
lowerCamelCase__: Tuple =encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Union[str, Any] , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : List[Any]) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: Tuple =cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_)
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type") == "blip-2":
lowerCamelCase__: Any =config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "blip-2"
lowercase_ = True
def __init__(self : Any , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : int=32 , **UpperCAmelCase_ : str) ->List[str]:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if vision_config is None:
lowerCamelCase__: Optional[int] ={}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values.")
if qformer_config is None:
lowerCamelCase__: str ={}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values.")
if text_config is None:
lowerCamelCase__: Union[str, Any] ={}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).")
lowerCamelCase__: Optional[Any] =BlipaVisionConfig(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =BlipaQFormerConfig(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =text_config["model_type"] if "model_type" in text_config else "opt"
lowerCamelCase__: Dict =CONFIG_MAPPING[text_model_type](**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.text_config.tie_word_embeddings
lowerCamelCase__: List[str] =self.text_config.is_encoder_decoder
lowerCamelCase__: Dict =num_query_tokens
lowerCamelCase__: Optional[Any] =self.vision_config.hidden_size
lowerCamelCase__: Tuple =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase__: List[Any] =1.0
lowerCamelCase__: Union[str, Any] =0.02
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Any , UpperCAmelCase_ : BlipaVisionConfig , UpperCAmelCase_ : BlipaQFormerConfig , UpperCAmelCase_ : PretrainedConfig , **UpperCAmelCase_ : int , ) ->Optional[int]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->int:
'''simple docstring'''
lowerCamelCase__: List[Any] =copy.deepcopy(self.__dict__)
lowerCamelCase__: Any =self.vision_config.to_dict()
lowerCamelCase__: Any =self.qformer_config.to_dict()
lowerCamelCase__: Any =self.text_config.to_dict()
lowerCamelCase__: int =self.__class__.model_type
return output
| 437
| 1
|
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__lowerCamelCase = datasets.logging.get_logger(__name__)
__lowerCamelCase = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
__lowerCamelCase = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
__lowerCamelCase = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
__lowerCamelCase = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def lowerCamelCase__ ( self : Any ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def lowerCamelCase__ ( self : List[Any] , __snake_case : Optional[int] ) -> Tuple:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
__magic_name__: List[Any] = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
__magic_name__: str = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__magic_name__: List[Any] = self.config_name.upper()
else:
raise KeyError(
F'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' )
# download the model checkpoint specified by self.config_name and set up the scorer
__magic_name__: Union[str, Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__magic_name__: Any = score.BleurtScorer(os.path.join(__snake_case , __snake_case ) )
def lowerCamelCase__ ( self : Dict , __snake_case : Any , __snake_case : List[Any] ) -> Optional[Any]:
__magic_name__: Union[str, Any] = self.scorer.score(references=__snake_case , candidates=__snake_case )
return {"scores": scores}
| 96
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
__a = {
'moussaKam/mbarthez': 1_0_2_4,
'moussaKam/barthez': 1_0_2_4,
'moussaKam/barthez-orangesum-title': 1_0_2_4,
}
__a = '▁'
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :int = VOCAB_FILES_NAMES
a :Any = PRETRAINED_VOCAB_FILES_MAP
a :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a :Dict = ['input_ids', 'attention_mask']
a :Optional[Any] = BarthezTokenizer
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Dict="<s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE_ : List[str]="<pad>" , SCREAMING_SNAKE_CASE_ : Any="<mask>" , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> Any:
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ = [self.cls_token_id]
lowercase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 97
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowerCAmelCase :
def __init__( self , snake_case__ , ):
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Tuple = 13
lowerCAmelCase : List[Any] = 7
lowerCAmelCase : str = True
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase : Tuple = False
lowerCAmelCase : int = True
lowerCAmelCase : str = 99
lowerCAmelCase : str = 32
lowerCAmelCase : Tuple = 2
lowerCAmelCase : List[Any] = 4
lowerCAmelCase : Dict = 37
lowerCAmelCase : int = 'gelu'
lowerCAmelCase : Dict = 0.1
lowerCAmelCase : Dict = 0.1
lowerCAmelCase : str = 512
lowerCAmelCase : Optional[int] = 16
lowerCAmelCase : Tuple = 2
lowerCAmelCase : Tuple = 0.0_2
lowerCAmelCase : Tuple = 3
lowerCAmelCase : Optional[int] = 4
lowerCAmelCase : Union[str, Any] = None
def lowercase ( self ):
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Tuple = None
if self.use_input_mask:
lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : List[Any] = None
if self.use_labels:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : List[str] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Any = TFDistilBertModel(config=snake_case__ )
lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase : List[Any] = model(snake_case__ )
lowerCAmelCase : List[str] = [input_ids, input_mask]
lowerCAmelCase : Optional[int] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Tuple = TFDistilBertForMaskedLM(config=snake_case__ )
lowerCAmelCase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase : int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[Any] = TFDistilBertForQuestionAnswering(config=snake_case__ )
lowerCAmelCase : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
lowerCAmelCase : Optional[int] = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = self.num_labels
lowerCAmelCase : List[str] = TFDistilBertForSequenceClassification(snake_case__ )
lowerCAmelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase : Dict = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = self.num_choices
lowerCAmelCase : Optional[Any] = TFDistilBertForMultipleChoice(snake_case__ )
lowerCAmelCase : Dict = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : List[Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Any = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[Any] = self.num_labels
lowerCAmelCase : str = TFDistilBertForTokenClassification(snake_case__ )
lowerCAmelCase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase : List[str] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) : Tuple = config_and_inputs
lowerCAmelCase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : Union[str, Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_lowerCamelCase : Optional[int] = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : int = False
def lowercase ( self ):
lowerCAmelCase : str = TFDistilBertModelTester(self )
lowerCAmelCase : Any = ConfigTester(self , config_class=snake_case__ , dim=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*snake_case__ )
@slow
def lowercase ( self ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowerCAmelCase : Any = TFDistilBertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase ( self ):
lowerCAmelCase : Optional[int] = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
lowerCAmelCase : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : Any = model(snake_case__ )[0]
lowerCAmelCase : List[str] = [1, 6, 768]
self.assertEqual(output.shape , snake_case__ )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1e-4 )
| 646
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCamelCase ( _A : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase : Tuple = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __UpperCamelCase ( _A : List[Any] , _A : Dict ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __UpperCamelCase ( _A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", 'stage2.cls_token') )
return token
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
lowerCAmelCase : List[Any] = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __UpperCamelCase ( _A : str , _A : Optional[Any] , _A : Dict , _A : str ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : List[str] = 'imagenet-1k-id2label.json'
lowerCAmelCase : Tuple = 10_00
lowerCAmelCase : str = 'huggingface/label-files'
lowerCAmelCase : List[Any] = num_labels
lowerCAmelCase : Any = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type='dataset' ) ) , 'r' ) )
lowerCAmelCase : List[str] = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
lowerCAmelCase : int = CvtConfig(num_labels=_A , idalabel=_A , labelaid=_A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowerCAmelCase : List[str] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowerCAmelCase : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCAmelCase : Any = [2, 2, 20]
lowerCAmelCase : List[str] = [3, 12, 16]
lowerCAmelCase : List[Any] = [1_92, 7_68, 10_24]
lowerCAmelCase : Union[str, Any] = CvtForImageClassification(_A )
lowerCAmelCase : str = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : List[Any] = torch.load(_A , map_location=torch.device('cpu' ) )
lowerCAmelCase : str = OrderedDict()
lowerCAmelCase : int = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCAmelCase : List[str] = list_of_state_dict + cls_token(_A )
lowerCAmelCase : Optional[Any] = list_of_state_dict + embeddings(_A )
for cnt in range(config.depth[idx] ):
lowerCAmelCase : List[Any] = list_of_state_dict + attention(_A , _A )
lowerCAmelCase : List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_A )
for i in range(len(_A ) ):
lowerCAmelCase : Tuple = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_lowerCAmelCase : str = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 646
| 1
|
"""simple docstring"""
def __snake_case ( ) -> Tuple:
"""simple docstring"""
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def __snake_case ( UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
A = 1
A = 2
while i * i <= n:
A = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __snake_case ( ) -> Any:
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(UpperCamelCase__ ) > 500 )
if __name__ == "__main__":
print(solution())
| 690
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : List[Any] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
UpperCamelCase : Any = {"mobilebert-uncased": 512}
UpperCamelCase : Any = {}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = MobileBertTokenizer
def __init__( self : Optional[int] , _lowercase : Optional[int]=None , _lowercase : Any=None , _lowercase : Optional[int]=True , _lowercase : int="[UNK]" , _lowercase : Dict="[SEP]" , _lowercase : Any="[PAD]" , _lowercase : str="[CLS]" , _lowercase : Union[str, Any]="[MASK]" , _lowercase : List[Any]=True , _lowercase : Any=None , **_lowercase : Optional[Any] , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowercase ) != tokenize_chinese_chars
):
A = getattr(_lowercase , normalizer_state.pop('type' ) )
A = do_lower_case
A = strip_accents
A = tokenize_chinese_chars
A = normalizer_class(**_lowercase )
A = do_lower_case
def __a ( self : List[Any] , _lowercase : Tuple , _lowercase : Any=None ):
A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 690
| 1
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=2 , lowerCAmelCase__=24 , lowerCAmelCase__=16 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.02 , lowerCAmelCase__=None , lowerCAmelCase__=2 , lowerCAmelCase__=2 , ) -> Optional[Any]:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = patch_size
__lowercase = max_length
__lowercase = num_mel_bins
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = scope
__lowercase = frequency_stride
__lowercase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__lowercase = (self.max_length - self.patch_size) // self.time_stride + 1
__lowercase = frequency_out_dimension * time_out_dimension
__lowercase = num_patches + 2
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, input_values, labels
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
__lowercase = ASTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : int = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__a : Optional[int] = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
__a : Tuple = False
__a : Optional[int] = False
__a : Any = False
__a : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = ASTModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCAmelCase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''input_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ASTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
__lowercase , __lowercase = torchaudio.load(lowercase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.default_feature_extractor
__lowercase = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(lowerCAmelCase__ )
__lowercase = self.default_feature_extractor
__lowercase , __lowercase = prepare_audio()
__lowercase = audio.squeeze().numpy()
__lowercase = feature_extractor(lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCAmelCase__ )
# verify the logits
__lowercase = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__lowercase = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 522
|
from ...processing_utils import ProcessorMixin
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Tuple = '''SpeechT5FeatureExtractor'''
__a : Optional[Any] = '''SpeechT5Tokenizer'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
__lowercase = kwargs.pop('''audio''' , lowerCAmelCase__ )
__lowercase = kwargs.pop('''text''' , lowerCAmelCase__ )
__lowercase = kwargs.pop('''text_target''' , lowerCAmelCase__ )
__lowercase = kwargs.pop('''audio_target''' , lowerCAmelCase__ )
__lowercase = kwargs.pop('''sampling_rate''' , lowerCAmelCase__ )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
__lowercase = self.feature_extractor(lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ )
elif text is not None:
__lowercase = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
else:
__lowercase = None
if audio_target is not None:
__lowercase = self.feature_extractor(audio_target=lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = targets['''input_values''']
elif text_target is not None:
__lowercase = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = targets['''input_ids''']
else:
__lowercase = None
if inputs is None:
return targets
if targets is not None:
__lowercase = labels
__lowercase = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__lowercase = decoder_attention_mask
return inputs
def _SCREAMING_SNAKE_CASE ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = kwargs.pop('''input_values''' , lowerCAmelCase__ )
__lowercase = kwargs.pop('''input_ids''' , lowerCAmelCase__ )
__lowercase = kwargs.pop('''labels''' , lowerCAmelCase__ )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
__lowercase = self.feature_extractor.pad(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
elif input_ids is not None:
__lowercase = self.tokenizer.pad(lowerCAmelCase__ , **lowerCAmelCase__ )
else:
__lowercase = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and "input_ids" in labels[0]):
__lowercase = self.tokenizer.pad(lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = targets['''input_ids''']
else:
__lowercase = self.feature_extractor.feature_size
__lowercase = self.feature_extractor.num_mel_bins
__lowercase = self.feature_extractor.pad(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = feature_size_hack
__lowercase = targets['''input_values''']
else:
__lowercase = None
if inputs is None:
return targets
if targets is not None:
__lowercase = labels
__lowercase = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__lowercase = decoder_attention_mask
return inputs
def _SCREAMING_SNAKE_CASE ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 522
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.