code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
def __lowerCAmelCase ( __snake_case ):
if len(A__ ) < 2:
return collection
def circle_sort_util(__snake_case , __snake_case , __snake_case ) -> bool:
__lowerCAmelCase = False
if low == high:
return swapped
__lowerCAmelCase = low
__lowerCAmelCase = high
while left < right:
if collection[left] > collection[right]:
__lowerCAmelCase , __lowerCAmelCase = (
collection[right],
collection[left],
)
__lowerCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__lowerCAmelCase , __lowerCAmelCase = (
collection[right + 1],
collection[left],
)
__lowerCAmelCase = True
__lowerCAmelCase = low + int((high - low) / 2 )
__lowerCAmelCase = circle_sort_util(A__ , A__ , A__ )
__lowerCAmelCase = circle_sort_util(A__ , mid + 1 , A__ )
return swapped or left_swap or right_swap
__lowerCAmelCase = True
while is_not_sorted is True:
__lowerCAmelCase = circle_sort_util(A__ , 0 , len(A__ ) - 1 )
return collection
if __name__ == "__main__":
lowerCamelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase : Tuple = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 367
|
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = CpmAntTokenizer
A : Optional[int] = False
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
super().setUp()
lowercase__ = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
@tooslow
def UpperCAmelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
lowercase__ = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b')
lowercase__ = '今天天气真好!'
lowercase__ = ['今天', '天气', '真', '好', '!']
lowercase__ = tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = '今天天气真好!'
lowercase__ = [tokenizer.bos_token] + tokens
lowercase__ = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase) , lowerCAmelCase)
lowercase__ = tokenizer.decode(lowerCAmelCase)
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
| 622
| 0
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __UpperCamelCase( _A : Optional[int] , _A : Union[str, Any] , _A : Optional[int] , _A : Any , _A : Optional[int]=True , _A : List[Any]="pt" ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {'''add_prefix_space''': True} if isinstance(_A , _A ) and not line.startswith(''' ''' ) else {}
UpperCAmelCase__ : str = padding_side
return tokenizer(
[line] , max_length=_A , padding='''max_length''' if pad_to_max_length else None , truncation=_A , return_tensors=_A , add_special_tokens=_A , **_A , )
def __UpperCamelCase( _A : Any , _A : Union[str, Any] , _A : str=None , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = input_ids.ne(_A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_="train" ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_="" ,) -> Any:
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : List[str] = Path(lowerCamelCase_ ).joinpath(type_path + '''.source''' )
UpperCAmelCase__ : Optional[int] = Path(lowerCamelCase_ ).joinpath(type_path + '''.target''' )
UpperCAmelCase__ : Tuple = self.get_char_lens(self.src_file )
UpperCAmelCase__ : Union[str, Any] = max_source_length
UpperCAmelCase__ : Optional[int] = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
UpperCAmelCase__ : str = tokenizer
UpperCAmelCase__ : Optional[int] = prefix
if n_obs is not None:
UpperCAmelCase__ : Optional[int] = self.src_lens[:n_obs]
UpperCAmelCase__ : int = src_lang
UpperCAmelCase__ : Dict = tgt_lang
def __len__( self ) -> Optional[int]:
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self ,lowerCamelCase_ ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = index + 1 # linecache starts at 1
UpperCAmelCase__ : Tuple = self.prefix + linecache.getline(str(self.src_file ) ,lowerCamelCase_ ).rstrip('''\n''' )
UpperCAmelCase__ : List[str] = linecache.getline(str(self.tgt_file ) ,lowerCamelCase_ ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,lowerCamelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCAmelCase__ : int = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,lowerCamelCase_ ) else self.tokenizer
)
UpperCAmelCase__ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,lowerCamelCase_ ) else self.tokenizer
UpperCAmelCase__ : List[Any] = encode_line(lowerCamelCase_ ,lowerCamelCase_ ,self.max_source_length ,'''right''' )
UpperCAmelCase__ : Dict = encode_line(lowerCamelCase_ ,lowerCamelCase_ ,self.max_target_length ,'''right''' )
UpperCAmelCase__ : Any = source_inputs['''input_ids'''].squeeze()
UpperCAmelCase__ : Any = target_inputs['''input_ids'''].squeeze()
UpperCAmelCase__ : int = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_ ) -> Any:
'''simple docstring'''
return [len(lowerCamelCase_ ) for x in Path(lowerCamelCase_ ).open().readlines()]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = torch.stack([x['''input_ids'''] for x in batch] )
UpperCAmelCase__ : int = torch.stack([x['''attention_mask'''] for x in batch] )
UpperCAmelCase__ : List[Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
UpperCAmelCase__ : Optional[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
UpperCAmelCase__ : Optional[Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
UpperCAmelCase__ : Dict = trim_batch(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase__ : List[str] = trim_batch(lowerCamelCase_ ,lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
UpperCAmelCase__ : str = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
UpperCamelCase__ : int = getLogger(__name__)
def __UpperCamelCase( _A : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(_A ) )
def __UpperCamelCase( _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = get_git_info()
save_json(_A , os.path.join(_A , '''git_log.json''' ) )
def __UpperCamelCase( _A : Tuple , _A : Optional[Any] , _A : List[Any]=4 , **_A : Optional[Any] ):
'''simple docstring'''
with open(_A , '''w''' ) as f:
json.dump(_A , _A , indent=_A , **_A )
def __UpperCamelCase( _A : Tuple ):
'''simple docstring'''
with open(_A ) as f:
return json.load(_A )
def __UpperCamelCase( ):
'''simple docstring'''
UpperCAmelCase__ : int = git.Repo(search_parent_directories=_A )
UpperCAmelCase__ : int = {
'''repo_id''': str(_A ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def __UpperCamelCase( _A : Callable , _A : Iterable ):
'''simple docstring'''
return list(map(_A , _A ) )
def __UpperCamelCase( _A : str , _A : str ):
'''simple docstring'''
with open(_A , '''wb''' ) as f:
return pickle.dump(_A , _A )
def __UpperCamelCase( _A : int ):
'''simple docstring'''
def remove_articles(_A : int ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , _A )
def white_space_fix(_A : List[Any] ):
return " ".join(text.split() )
def remove_punc(_A : Tuple ):
UpperCAmelCase__ : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def __UpperCamelCase( _A : Optional[Any] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = normalize_answer(_A ).split()
UpperCAmelCase__ : str = normalize_answer(_A ).split()
UpperCAmelCase__ : str = Counter(_A ) & Counter(_A )
UpperCAmelCase__ : Optional[Any] = sum(common.values() )
if num_same == 0:
return 0
UpperCAmelCase__ : Union[str, Any] = 1.0 * num_same / len(_A )
UpperCAmelCase__ : List[Any] = 1.0 * num_same / len(_A )
UpperCAmelCase__ : Dict = (2 * precision * recall) / (precision + recall)
return fa
def __UpperCamelCase( _A : Dict , _A : Tuple ):
'''simple docstring'''
return normalize_answer(_A ) == normalize_answer(_A )
def __UpperCamelCase( _A : List[str] , _A : List[str] ):
'''simple docstring'''
assert len(_A ) == len(_A )
UpperCAmelCase__ : Optional[int] = 0
for hypo, pred in zip(_A , _A ):
em += exact_match_score(_A , _A )
if len(_A ) > 0:
em /= len(_A )
return {"em": em}
def __UpperCamelCase( _A : int ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def __UpperCamelCase( _A : str , _A : Any , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCAmelCase__ : Optional[int] = '''dropout_rate'''
for p in extra_params:
if getattr(_A , _A , _A ):
if not hasattr(_A , _A ) and not hasattr(_A , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(_A ) )
delattr(_A , _A )
continue
UpperCAmelCase__ : int = p if hasattr(_A , _A ) else equivalent_param[p]
setattr(_A , _A , getattr(_A , _A ) )
delattr(_A , _A )
return hparams, config
| 709
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : int = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {'vocab_file': 'vocab.json'}
UpperCamelCase__ : Tuple = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
UpperCamelCase__ : List[Any] = {'mgp-str': 27}
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = VOCAB_FILES_NAMES
UpperCAmelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_="[GO]" ,lowerCamelCase_="[GO]" ,lowerCamelCase_="[s]" ,lowerCamelCase_="[GO]" ,**lowerCamelCase_ ) -> str:
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,**lowerCamelCase_ ,)
with open(lowerCamelCase_ ,encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
UpperCAmelCase__ : Any = {v: k for k, v in self.vocab.items()}
@property
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return len(self.vocab )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return dict(self.vocab ,**self.added_tokens_encoder )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
for s in text:
char_tokens.extend(lowerCamelCase_ )
return char_tokens
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
return self.vocab.get(lowerCamelCase_ ,self.vocab.get(self.unk_token ) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Any:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
UpperCAmelCase__ : Union[str, Any] = os.path.join(
lowerCamelCase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(lowerCamelCase_ ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + '''\n''' )
return (vocab_file,)
| 496
| 0
|
import os
import numpy
import onnx
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
A : int = a.name
A : Optional[int] = b.name
A : Tuple = """"""
A : List[Any] = """"""
A : Union[str, Any] = a == b
A : Optional[Any] = name_a
A : List[Any] = name_b
return res
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_lowerCAmelCase , _lowerCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCAmelCase , _lowerCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _lowerCAmelCase , _lowerCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCAmelCase , _lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
A : str = list(model.graph.initializer )
A : int = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
A : int = inits[i].name
A : Optional[Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _lowerCAmelCase , _lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
A : Union[str, Any] = os.path.dirname(_lowerCAmelCase )
A : List[Any] = os.path.basename(_lowerCAmelCase )
A : int = onnx.load(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
A : str = list(model.graph.initializer )
A : int = set()
A : Optional[Any] = {}
A : List[Any] = []
A : Optional[int] = 0
for i in range(len(_lowerCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_lowerCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_lowerCAmelCase )
dup_set.add(_lowerCAmelCase )
A : List[Any] = inits[j].data_type
A : Dict = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , _lowerCAmelCase )
total_reduced_size += mem_size
A : Optional[Any] = inits[i].name
A : str = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_lowerCAmelCase )
else:
A : Optional[Any] = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
A : Optional[int] = sorted(_lowerCAmelCase )
_remove_dup_initializers_from_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : List[str] = """optimized_""" + model_file_name
A : Union[str, Any] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
onnx.save(_lowerCAmelCase , _lowerCAmelCase )
return new_model
| 662
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
A : Dict = """backbone.""" if is_semantic else """"""
A : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A : Dict = """backbone.""" if is_semantic else """"""
# queries, keys and values
A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A : int = in_proj_weight[
: config.hidden_size, :
]
A : Any = q_bias
A : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : Tuple = in_proj_weight[
-config.hidden_size :, :
]
A : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A : Dict = gamma_a
A : Dict = gamma_a
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : List[str] = dct.pop(_lowerCAmelCase )
A : Optional[Any] = val
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
"""simple docstring"""
A : Dict = False if """rvlcdip""" in checkpoint_url else True
A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A : Dict = 1024
A : List[Any] = 4096
A : int = 24
A : int = 16
# labels
if "rvlcdip" in checkpoint_url:
A : List[Any] = 16
A : List[Any] = """huggingface/label-files"""
A : int = """rvlcdip-id2label.json"""
A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : int = idalabel
A : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
A : int = prepare_img()
A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
A : str = encoding["""pixel_values"""]
A : Tuple = model(_lowerCAmelCase )
A : Optional[int] = outputs.logits
# verify logits
A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = "sew-d"
def __init__( self , __A=32 , __A=768 , __A=12 , __A=12 , __A=3072 , __A=2 , __A=512 , __A=256 , __A=True , __A=True , __A=("p2c", "c2p") , __A="layer_norm" , __A="gelu_python" , __A=0.1 , __A=0.1 , __A=0.1 , __A=0.0 , __A=0.1 , __A=0.02 , __A=1e-7 , __A=1e-5 , __A="group" , __A="gelu" , __A=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __A=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __A=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __A=False , __A=128 , __A=16 , __A=True , __A=0.05 , __A=10 , __A=2 , __A=0.0 , __A=10 , __A=0 , __A="mean" , __A=False , __A=False , __A=256 , __A=0 , __A=1 , __A=2 , **__A , ):
"""simple docstring"""
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
lowerCamelCase : Optional[int] = hidden_size
lowerCamelCase : int = feat_extract_norm
lowerCamelCase : str = feat_extract_activation
lowerCamelCase : Union[str, Any] = list(__A )
lowerCamelCase : str = list(__A )
lowerCamelCase : Any = list(__A )
lowerCamelCase : Tuple = conv_bias
lowerCamelCase : str = num_conv_pos_embeddings
lowerCamelCase : Tuple = num_conv_pos_embedding_groups
lowerCamelCase : Tuple = len(self.conv_dim )
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Dict = squeeze_factor
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : List[Any] = position_buckets
lowerCamelCase : Any = share_att_key
lowerCamelCase : int = relative_attention
lowerCamelCase : int = norm_rel_ebd
lowerCamelCase : Any = list(__A )
lowerCamelCase : str = hidden_act
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Dict = hidden_dropout
lowerCamelCase : List[Any] = attention_dropout
lowerCamelCase : List[str] = activation_dropout
lowerCamelCase : Optional[int] = feat_proj_dropout
lowerCamelCase : Dict = final_dropout
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : int = feature_layer_norm_eps
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase : Union[str, Any] = apply_spec_augment
lowerCamelCase : Dict = mask_time_prob
lowerCamelCase : int = mask_time_length
lowerCamelCase : str = mask_time_min_masks
lowerCamelCase : Optional[int] = mask_feature_prob
lowerCamelCase : List[str] = mask_feature_length
lowerCamelCase : Any = mask_feature_min_masks
# ctc loss
lowerCamelCase : List[str] = ctc_loss_reduction
lowerCamelCase : Optional[Any] = ctc_zero_infinity
# sequence classification
lowerCamelCase : Optional[int] = use_weighted_layer_sum
lowerCamelCase : int = classifier_proj_size
@property
def _snake_case ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 231
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''BeitFeatureExtractor''']
_snake_case = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 231
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Dict = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = '''roberta'''
def __init__( self , UpperCAmelCase=5_0_2_6_5 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1E-12 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
@property
def lowerCamelCase_ ( self ):
if self.task == "multiple-choice":
__lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 479
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def UpperCamelCase__ ( _A: Tuple ):
'''simple docstring'''
__lowerCamelCase = VideoMAEConfig()
set_architecture_configs(_A , _A )
if "finetuned" not in model_name:
__lowerCamelCase = False
if "finetuned" in model_name:
__lowerCamelCase = """huggingface/label-files"""
if "kinetics" in model_name:
__lowerCamelCase = 400
__lowerCamelCase = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
__lowerCamelCase = 174
__lowerCamelCase = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
__lowerCamelCase = json.load(open(hf_hub_download(_A , _A , repo_type="""dataset""" ) , """r""" ) )
__lowerCamelCase = {int(_A ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase__ ( _A: Optional[int] , _A: List[str] ):
'''simple docstring'''
if "small" in model_name:
__lowerCamelCase = 384
__lowerCamelCase = 1536
__lowerCamelCase = 12
__lowerCamelCase = 16
__lowerCamelCase = 12
__lowerCamelCase = 3
__lowerCamelCase = 192
__lowerCamelCase = 768
elif "large" in model_name:
__lowerCamelCase = 1024
__lowerCamelCase = 4096
__lowerCamelCase = 24
__lowerCamelCase = 16
__lowerCamelCase = 12
__lowerCamelCase = 8
__lowerCamelCase = 512
__lowerCamelCase = 2048
elif "huge" in model_name:
__lowerCamelCase = 1280
__lowerCamelCase = 5120
__lowerCamelCase = 32
__lowerCamelCase = 16
__lowerCamelCase = 12
__lowerCamelCase = 8
__lowerCamelCase = 640
__lowerCamelCase = 2560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def UpperCamelCase__ ( _A: Tuple ):
'''simple docstring'''
if "encoder." in name:
__lowerCamelCase = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
__lowerCamelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
__lowerCamelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__lowerCamelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
__lowerCamelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__lowerCamelCase = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
__lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
__lowerCamelCase = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
__lowerCamelCase = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
__lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__lowerCamelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__lowerCamelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__lowerCamelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__lowerCamelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__lowerCamelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
__lowerCamelCase = name.replace("""head""" , """classifier""" )
return name
def UpperCamelCase__ ( _A: List[str] , _A: Any ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowerCamelCase = orig_state_dict.pop(_A )
if key.startswith("""encoder.""" ):
__lowerCamelCase = key.replace("""encoder.""" , """""" )
if "qkv" in key:
__lowerCamelCase = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
__lowerCamelCase = config.decoder_hidden_size
__lowerCamelCase = int(key_split[2] )
__lowerCamelCase = """decoder.decoder_layers."""
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
else:
__lowerCamelCase = config.hidden_size
__lowerCamelCase = int(key_split[1] )
__lowerCamelCase = """videomae.encoder.layer."""
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
else:
__lowerCamelCase = val
return orig_state_dict
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__lowerCamelCase = np.load(_A )
return list(_A )
def UpperCamelCase__ ( _A: Dict , _A: Optional[Any] , _A: Tuple , _A: int ):
'''simple docstring'''
__lowerCamelCase = get_videomae_config(_A )
if "finetuned" in model_name:
__lowerCamelCase = VideoMAEForVideoClassification(_A )
else:
__lowerCamelCase = VideoMAEForPreTraining(_A )
# download original checkpoint, hosted on Google Drive
__lowerCamelCase = """pytorch_model.bin"""
gdown.cached_download(_A , _A , quiet=_A )
__lowerCamelCase = torch.load(_A , map_location="""cpu""" )
if "model" in files:
__lowerCamelCase = files["""model"""]
else:
__lowerCamelCase = files["""module"""]
__lowerCamelCase = convert_state_dict(_A , _A )
model.load_state_dict(_A )
model.eval()
# verify model on basic input
__lowerCamelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__lowerCamelCase = prepare_video()
__lowerCamelCase = image_processor(_A , return_tensors="""pt""" )
if "finetuned" not in model_name:
__lowerCamelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__lowerCamelCase = torch.load(_A )
__lowerCamelCase = model(**_A )
__lowerCamelCase = outputs.logits
__lowerCamelCase = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
__lowerCamelCase = torch.Size([1, 174] )
__lowerCamelCase = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
__lowerCamelCase = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__lowerCamelCase = torch.Size([1, 174] )
__lowerCamelCase = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
__lowerCamelCase = torch.Size([1, 174] )
__lowerCamelCase = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , _A , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , _A , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__lowerCamelCase = outputs.loss
assert torch.allclose(_A , _A , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_A )
model.save_pretrained(_A )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(_A , organization="""nielsr""" )
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_a : List[str] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 479
| 1
|
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCAmelCase_ ( _lowercase):
snake_case__ = 42
snake_case__ = None
def lowercase ( a__ : Union[str, Any] , a__ : int=0.999 , a__ : Tuple="cosine" , ) -> str:
if alpha_transform_type == "cosine":
def alpha_bar_fn(a__ : str ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a__ : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_UpperCamelCase = []
for i in range(a__ ):
_UpperCamelCase = i / num_diffusion_timesteps
_UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a__ ) / alpha_bar_fn(a__ ) , a__ ) )
return torch.tensor(a__ , dtype=torch.floataa )
class UpperCAmelCase_ ( _lowercase , _lowercase):
@register_to_config
def __init__( self : Tuple , __UpperCamelCase : int = 1000 , __UpperCamelCase : str = "fixed_small_log" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[float] = 1.0 , __UpperCamelCase : str = "epsilon" , __UpperCamelCase : str = "squaredcos_cap_v2" , ) -> List[str]:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
_UpperCamelCase = betas_for_alpha_bar(__UpperCamelCase )
_UpperCamelCase = 1.0 - self.betas
_UpperCamelCase = torch.cumprod(self.alphas , dim=0 )
_UpperCamelCase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_UpperCamelCase = 1.0
# setable values
_UpperCamelCase = None
_UpperCamelCase = torch.from_numpy(np.arange(0 , __UpperCamelCase )[::-1].copy() )
_UpperCamelCase = variance_type
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : Optional[int] = None ) -> torch.FloatTensor:
return sample
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : int , __UpperCamelCase : Union[str, torch.device] = None ) -> Optional[Any]:
_UpperCamelCase = num_inference_steps
_UpperCamelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_UpperCamelCase = (np.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_UpperCamelCase = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : Optional[int]=None ) -> List[Any]:
if prev_timestep is None:
_UpperCamelCase = t - 1
_UpperCamelCase = self.alphas_cumprod[t]
_UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_UpperCamelCase = 1 - alpha_prod_t
_UpperCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_UpperCamelCase = self.betas[t]
else:
_UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_UpperCamelCase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_UpperCamelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_UpperCamelCase = torch.log(torch.clamp(__UpperCamelCase , min=1E-20 ) )
_UpperCamelCase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_UpperCamelCase = variance.log()
_UpperCamelCase = beta.log()
_UpperCamelCase = (predicted_variance + 1) / 2
_UpperCamelCase = frac * max_log + (1 - frac) * min_log
return variance
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : int , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
_UpperCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_UpperCamelCase , _UpperCamelCase = torch.split(__UpperCamelCase , sample.shape[1] , dim=1 )
else:
_UpperCamelCase = None
# 1. compute alphas, betas
if prev_timestep is None:
_UpperCamelCase = t - 1
_UpperCamelCase = self.alphas_cumprod[t]
_UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_UpperCamelCase = 1 - alpha_prod_t
_UpperCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_UpperCamelCase = self.betas[t]
_UpperCamelCase = self.alphas[t]
else:
_UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
_UpperCamelCase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_UpperCamelCase = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_UpperCamelCase = torch.clamp(
__UpperCamelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCamelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_UpperCamelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_UpperCamelCase = 0
if t > 0:
_UpperCamelCase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__UpperCamelCase , device=model_output.device )
_UpperCamelCase = self._get_variance(
__UpperCamelCase , predicted_variance=__UpperCamelCase , prev_timestep=__UpperCamelCase , )
if self.variance_type == "fixed_small_log":
_UpperCamelCase = variance
elif self.variance_type == "learned_range":
_UpperCamelCase = (0.5 * variance).exp()
else:
raise ValueError(
F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
''' for the UnCLIPScheduler.''' )
_UpperCamelCase = variance * variance_noise
_UpperCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : torch.IntTensor , ) -> torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
_UpperCamelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
_UpperCamelCase = timesteps.to(original_samples.device )
_UpperCamelCase = alphas_cumprod[timesteps] ** 0.5
_UpperCamelCase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_UpperCamelCase = sqrt_alpha_prod.unsqueeze(-1 )
_UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
_UpperCamelCase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_UpperCamelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 342
|
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
UpperCAmelCase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def lowercase ( ) -> int:
_UpperCamelCase = Github(os.environ['''GITHUB_TOKEN'''] )
_UpperCamelCase = g.get_repo('''huggingface/transformers''' )
_UpperCamelCase = repo.get_issues(state='''open''' )
for issue in open_issues:
_UpperCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda a__ : i.created_at , reverse=a__ )
_UpperCamelCase = comments[0] if len(a__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 342
| 1
|
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ):
snake_case_ = torch.nn.Linear(10 , 10 )
snake_case_ = torch.optim.SGD(model.parameters() , 0.1 )
snake_case_ = Accelerator()
snake_case_ = accelerator.prepare(UpperCAmelCase_ )
try:
pickle.loads(pickle.dumps(UpperCAmelCase_ ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 508
|
'''simple docstring'''
from math import isqrt
def __snake_case ( lowercase : int ):
snake_case_ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowercase , lowercase ):
snake_case_ = False
return [i for i in range(2 , lowercase ) if is_prime[i]]
def __snake_case ( lowercase : int = 10**8 ):
snake_case_ = calculate_prime_numbers(max_number // 2 )
snake_case_ = 0
snake_case_ = 0
snake_case_ = len(lowercase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 508
| 1
|
"""simple docstring"""
import cva
import numpy as np
class UpperCAmelCase_ :
def __init__( self : int , __UpperCamelCase : float , __UpperCamelCase : int ) -> int:
if k in (0.0_4, 0.0_6):
_UpperCamelCase = k
_UpperCamelCase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : List[Any] ) -> str:
return str(self.k )
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : str ) -> tuple[cva.Mat, list[list[int]]]:
_UpperCamelCase = cva.imread(__UpperCamelCase , 0 )
_UpperCamelCase , _UpperCamelCase = img.shape
_UpperCamelCase = []
_UpperCamelCase = img.copy()
_UpperCamelCase = cva.cvtColor(__UpperCamelCase , cva.COLOR_GRAY2RGB )
_UpperCamelCase , _UpperCamelCase = np.gradient(__UpperCamelCase )
_UpperCamelCase = dx**2
_UpperCamelCase = dy**2
_UpperCamelCase = dx * dy
_UpperCamelCase = 0.0_4
_UpperCamelCase = self.window_size // 2
for y in range(__UpperCamelCase , h - offset ):
for x in range(__UpperCamelCase , w - offset ):
_UpperCamelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_UpperCamelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_UpperCamelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_UpperCamelCase = (wxx * wyy) - (wxy**2)
_UpperCamelCase = wxx + wyy
_UpperCamelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase = HarrisCorner(0.04, 3)
UpperCAmelCase , UpperCAmelCase = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 717
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCamelCase ( self : List[Any] ) -> List[str]:
_UpperCamelCase = 0
def _UpperCamelCase ( self : Any ) -> str:
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Any ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] ) -> List[str]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : int ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
_UpperCamelCase = CLIPImageProcessor(**__UpperCamelCase )
# save in new folder
model_config.save_pretrained(__UpperCamelCase )
config.save_pretrained(__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
# make sure private variable is not incorrectly saved
_UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
with self.assertRaisesRegex(
__UpperCamelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''clip-base''' )
def _UpperCamelCase ( self : Dict ) -> Union[str, Any]:
with self.assertRaisesRegex(
__UpperCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase , revision='''aaaaaa''' )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
with self.assertRaisesRegex(
__UpperCamelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def _UpperCamelCase ( self : int ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__UpperCamelCase ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCamelCase ):
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase , trust_remote_code=__UpperCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
_UpperCamelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _UpperCamelCase ( self : List[str] ) -> Optional[Any]:
class UpperCAmelCase_ ( _lowercase):
snake_case__ = True
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
# If remote code is not set, the default is to use local
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(__UpperCamelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 342
| 0
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if config is None:
assert isinstance(self.model , __lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
__magic_name__ :Dict = self.model.config
else:
__magic_name__ :List[Any] = config
__magic_name__ :Tuple = data_args
__magic_name__ :Any = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
''' padding..''' )
if self.args.label_smoothing == 0:
__magic_name__ :List[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__magic_name__ :str = label_smoothed_nll_loss
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if self.optimizer is None:
__magic_name__ :int = ['''bias''', '''LayerNorm.weight''']
__magic_name__ :Union[str, Any] = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
__magic_name__ :Tuple = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__magic_name__ :List[Any] = Adafactor
__magic_name__ :Union[str, Any] = {'''scale_parameter''': False, '''relative_step''': False}
else:
__magic_name__ :Union[str, Any] = AdamW
__magic_name__ :int = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
__magic_name__ :List[str] = self.args.learning_rate
if self.sharded_ddp:
__magic_name__ :List[str] = OSS(
params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , )
else:
__magic_name__ :List[str] = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase )
if self.lr_scheduler is None:
__magic_name__ :Union[str, Any] = self._get_lr_scheduler(__lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__magic_name__ :int = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__magic_name__ :List[str] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__magic_name__ :str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase )
return scheduler
def A ( self ):
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__magic_name__ :Tuple = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
__magic_name__ :List[Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__magic_name__ , __magic_name__ :Dict = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2]
else:
# compute label smoothed loss
__magic_name__ :Optional[int] = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
__magic_name__ :Dict = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
__magic_name__ , __magic_name__ :int = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :str = inputs.pop('''labels''' )
__magic_name__ , __magic_name__ :Optional[Any] = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return loss
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :Optional[int] = self._prepare_inputs(__lowerCAmelCase )
__magic_name__ :Optional[int] = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__magic_name__ :Optional[Any] = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **__lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__magic_name__ :Optional[Any] = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs['''max_length'''] )
__magic_name__ :int = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
__magic_name__ , __magic_name__ :Optional[int] = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__magic_name__ :int = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__magic_name__ :Any = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
# If PAD token is not defined at least EOS token has to be defined
__magic_name__ :Optional[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
F''' padded to `max_length`={max_length}''' )
__magic_name__ :str = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__magic_name__ :Optional[Any] = tensor
return padded_tensor
| 0
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCamelCase__ : Optional[Any] = logging.getLogger(__name__)
UpperCamelCase__ : Dict = 50 # max width of layer names
UpperCamelCase__ : Any = 70 # max width of quantizer names
def __UpperCamelCase( _A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=_A , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=_A , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=_A , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=_A , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=_A , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=_A , type=_A , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=_A , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def __UpperCamelCase( _A : Tuple ):
'''simple docstring'''
if args.calibrator == "max":
UpperCAmelCase__ : str = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
UpperCAmelCase__ : Dict = '''histogram'''
elif args.calibrator == "mse":
UpperCAmelCase__ : Any = '''histogram'''
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
UpperCAmelCase__ : Dict = QuantDescriptor(num_bits=args.aprec , calib_method=_A )
UpperCAmelCase__ : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_A )
quant_nn.QuantLinear.set_default_quant_desc_weight(_A )
def __UpperCamelCase( _A : Any , _A : Any , _A : Any=False , _A : Optional[Any]=False ):
'''simple docstring'''
logger.info('''Configuring Model for Quantization''' )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_A , ['''embeddings'''] , which='''weight''' , _disabled=_A )
if args.quant_disable:
set_quantizer_by_name(_A , [''''''] , _disabled=_A )
if args.quant_disable_keyword:
set_quantizer_by_name(_A , args.quant_disable_keyword , _disabled=_A )
if args.quant_disable_layer_module:
set_quantizer_by_name(_A , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=_A )
if args.quant_enable_layer_module:
set_quantizer_by_name(_A , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=_A )
if args.recalibrate_weights:
recalibrate_weights(_A )
if args.fuse_qkv:
fuse_qkv(_A , _A )
if args.clip_gelu:
clip_gelu(_A , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_A )
def __UpperCamelCase( _A : str ):
'''simple docstring'''
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def __UpperCamelCase( _A : Tuple , _A : Any ):
'''simple docstring'''
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_A )
def __UpperCamelCase( _A : Dict , _A : Optional[int] ):
'''simple docstring'''
def fusea(_A : Optional[Any] , _A : Optional[Any] , _A : Dict ):
for mod in [qq, qk, qv]:
if not hasattr(_A , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
UpperCAmelCase__ : Dict = qq._amax.detach().item()
UpperCAmelCase__ : List[Any] = qk._amax.detach().item()
UpperCAmelCase__ : Optional[int] = qv._amax.detach().item()
UpperCAmelCase__ : Dict = max(_A , _A , _A )
qq._amax.fill_(_A )
qk._amax.fill_(_A )
qv._amax.fill_(_A )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def __UpperCamelCase( _A : Dict , _A : Any ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
UpperCAmelCase__ : Union[str, Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_A )
UpperCAmelCase__ : Tuple = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def __UpperCamelCase( _A : str ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_A , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
UpperCAmelCase__ : int = mod.weight.shape[0]
UpperCAmelCase__ : Tuple = mod._weight_quantizer._amax.detach()
UpperCAmelCase__ : Optional[int] = torch.ones(_A , dtype=amax.dtype , device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def __UpperCamelCase( _A : List[str] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_A , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCAmelCase__ : Any = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCAmelCase__ : Optional[Any] = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCAmelCase__ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_A , keepdims=_A ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
UpperCAmelCase__ : str = amax
def __UpperCamelCase( _A : Dict , _A : Tuple=25 , _A : Any=1_80 , _A : Optional[int]=None ):
'''simple docstring'''
if ignore is None:
UpperCAmelCase__ : Dict = []
elif not isinstance(_A , _A ):
UpperCAmelCase__ : int = [ignore]
UpperCAmelCase__ : Optional[int] = 0
for name, mod in model.named_modules():
if not hasattr(_A , '''weight''' ):
continue
UpperCAmelCase__ : Dict = max(_A , len(_A ) )
for name, mod in model.named_modules():
UpperCAmelCase__ : str = getattr(_A , '''_input_quantizer''' , _A )
UpperCAmelCase__ : int = getattr(_A , '''_weight_quantizer''' , _A )
if not hasattr(_A , '''weight''' ):
continue
if type(_A ) in ignore:
continue
if [True for s in ignore if type(_A ) is str and s in name]:
continue
UpperCAmelCase__ : Dict = F'''Act:{input_q.extra_repr()}'''
UpperCAmelCase__ : int = F'''Wgt:{weight_q.extra_repr()}'''
UpperCAmelCase__ : Dict = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(_A ) <= line_width:
logger.info(_A )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{' ':{name_width}} {wgt_str}''' )
def __UpperCamelCase( _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = 0
for name, mod in model.named_modules():
if isinstance(_A , pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def __UpperCamelCase( _A : Dict , _A : Optional[Any] , _A : Union[str, Any] , _A : Union[str, Any] , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = getattr(_A , _A , _A )
if quantizer_mod is not None:
assert hasattr(_A , _A )
setattr(_A , _A , _A )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def __UpperCamelCase( _A : str , _A : Any , _A : Optional[int]="both" , **_A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(_A , _A , '''_input_quantizer''' , _A , _A )
if which in ["weight", "both"]:
set_quantizer(_A , _A , '''_weight_quantizer''' , _A , _A )
logger.info(_A )
def __UpperCamelCase( _A : Tuple , _A : List[str] , **_A : Optional[Any] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_A , '''_input_quantizer''' ) or hasattr(_A , '''_weight_quantizer''' ):
for n in names:
if re.search(_A , _A ):
set_quantizers(_A , _A , **_A )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(_A , _A ):
UpperCAmelCase__ : str = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(_A , _A , _A )
logger.info(_A )
| 614
| 0
|
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
def run_func(lowerCAmelCase_ ):
@wraps(lowerCAmelCase_ )
def run_in_eager_mode(*lowerCAmelCase_ , **lowerCAmelCase_ ):
return func(*lowerCAmelCase_ , **lowerCAmelCase_ )
@wraps(lowerCAmelCase_ )
@tf.function(experimental_compile=lowerCAmelCase_ )
def run_in_graph_mode(*lowerCAmelCase_ , **lowerCAmelCase_ ):
return func(*lowerCAmelCase_ , **lowerCAmelCase_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = random.Random()
_snake_case : Union[str, Any] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowerCamelCase (a__ ):
_lowercase : TensorFlowBenchmarkArguments
_lowercase : PretrainedConfig
_lowercase : str = "TensorFlow"
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return tf.__version__
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> float:
"""simple docstring"""
_snake_case : Optional[Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_snake_case : Optional[Any] = self._prepare_inference_func(lowercase__ , lowercase__ , lowercase__ )
return self._measure_speed(_inference )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> float:
"""simple docstring"""
_snake_case : Dict = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_snake_case : Tuple = self._prepare_train_func(lowercase__ , lowercase__ , lowercase__ )
return self._measure_speed(_train )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> [Memory, Optional[MemorySummary]]:
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowercase__ )
_snake_case : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_snake_case : List[Any] = self._prepare_inference_func(lowercase__ , lowercase__ , lowercase__ )
return self._measure_memory(_inference )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> [Memory, Optional[MemorySummary]]:
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowercase__ )
_snake_case : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_snake_case : List[str] = self._prepare_train_func(lowercase__ , lowercase__ , lowercase__ )
return self._measure_memory(_train )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> Callable[[], None]:
"""simple docstring"""
_snake_case : Optional[int] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
_snake_case : List[Any] = (
hasattr(lowercase__ , '''architectures''' )
and isinstance(config.architectures , lowercase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : int = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : Dict = __import__('''transformers''' , fromlist=[model_class] )
_snake_case : List[Any] = getattr(lowercase__ , lowercase__ )
_snake_case : Tuple = model_cls(lowercase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
_snake_case : Union[str, Any] = TF_MODEL_MAPPING[config.__class__](lowercase__ )
# encoder-decoder has vocab size saved differently
_snake_case : Optional[Any] = config.vocab_size if hasattr(lowercase__ , '''vocab_size''' ) else config.encoder.vocab_size
_snake_case : List[str] = random_input_ids(lowercase__ , lowercase__ , lowercase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowercase__ , decoder_input_ids=lowercase__ , training=lowercase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowercase__ , training=lowercase__ )
_snake_case : List[Any] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> Callable[[], None]:
"""simple docstring"""
_snake_case : int = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
_snake_case : List[str] = (
hasattr(lowercase__ , '''architectures''' )
and isinstance(config.architectures , lowercase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : Dict = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : List[Any] = __import__('''transformers''' , fromlist=[model_class] )
_snake_case : str = getattr(lowercase__ , lowercase__ )
_snake_case : Optional[int] = model_cls(lowercase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
_snake_case : Any = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowercase__ )
# encoder-decoder has vocab size saved differently
_snake_case : Union[str, Any] = config.vocab_size if hasattr(lowercase__ , '''vocab_size''' ) else config.encoder.vocab_size
_snake_case : Any = random_input_ids(lowercase__ , lowercase__ , lowercase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_snake_case : Optional[int] = model(lowercase__ , decoder_input_ids=lowercase__ , labels=lowercase__ , training=lowercase__ )[0]
_snake_case : List[str] = tf.gradients(lowercase__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_snake_case : List[str] = model(lowercase__ , labels=lowercase__ , training=lowercase__ )[0]
_snake_case : Optional[int] = tf.gradients(lowercase__ , model.trainable_variables )
return gradients
_snake_case : List[Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCAmelCase_ ( self , lowercase__ ) -> float:
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(lowercase__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_snake_case : Tuple = timeit.repeat(
lowercase__ , repeat=self.args.repeat , number=10 , )
return min(lowercase__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def UpperCAmelCase_ ( self , lowercase__ ) -> [Memory, MemorySummary]:
"""simple docstring"""
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
_snake_case : List[Any] = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
_snake_case : str = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
_snake_case : Optional[int] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(lowercase__ )
_snake_case : Any = meminfo.used
_snake_case : str = Memory(lowercase__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
_snake_case : int = None
else:
_snake_case : Optional[int] = measure_peak_memory_cpu(lowercase__ )
_snake_case : List[str] = Memory(lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_snake_case : Optional[int] = stop_memory_tracing(lowercase__ )
if memory is None:
_snake_case : Any = summary.total
else:
_snake_case : Dict = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 47
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCamelCase (unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Any = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : str = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
| 47
| 1
|
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class a :
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Optional[int] , ) -> Tuple:
__snake_case : Union[str, Any] = parent
__snake_case : Dict = 13
__snake_case : Tuple = 7
__snake_case : Dict = 30
__snake_case : Any = self.seq_length + self.mem_len
__snake_case : int = 15
__snake_case : Union[str, Any] = True
__snake_case : List[Any] = True
__snake_case : List[Any] = 99
__snake_case : Optional[int] = [10, 50, 80]
__snake_case : List[str] = 32
__snake_case : List[Any] = 32
__snake_case : int = 4
__snake_case : Optional[Any] = 8
__snake_case : Union[str, Any] = 128
__snake_case : Optional[Any] = 2
__snake_case : Any = 2
__snake_case : str = None
__snake_case : Optional[int] = 1
__snake_case : str = 0
__snake_case : Union[str, Any] = 3
__snake_case : Tuple = self.vocab_size - 1
__snake_case : List[str] = 0.01
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : int = None
if self.use_labels:
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Dict = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __snake_case ( self : Any ) -> Union[str, Any]:
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __snake_case ( self : str , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : str ) -> Optional[Any]:
__snake_case : List[str] = TFTransfoXLModel(lowerCamelCase )
__snake_case , __snake_case : str = model(lowerCamelCase ).to_tuple()
__snake_case : Optional[Any] = {"input_ids": input_ids_a, "mems": mems_a}
__snake_case , __snake_case : List[Any] = model(lowerCamelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __snake_case ( self : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : int ) -> Dict:
__snake_case : int = TFTransfoXLLMHeadModel(lowerCamelCase )
__snake_case , __snake_case : str = model(lowerCamelCase ).to_tuple()
__snake_case : Tuple = {"input_ids": input_ids_a, "labels": lm_labels}
__snake_case , __snake_case : List[Any] = model(lowerCamelCase ).to_tuple()
__snake_case , __snake_case : Union[str, Any] = model([input_ids_a, mems_a] ).to_tuple()
__snake_case : List[str] = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
__snake_case , __snake_case : List[Any] = model(lowerCamelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : str , lowerCamelCase : Optional[int] ) -> List[str]:
__snake_case : str = TFTransfoXLForSequenceClassification(lowerCamelCase )
__snake_case : Optional[int] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : List[Any] ) -> Any:
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Tuple = config_and_inputs
__snake_case : Dict = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__UpperCAmelCase : List[Any] = () if is_tf_available() else ()
__UpperCAmelCase : Dict = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : int = False
def __snake_case ( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Any ) -> Optional[int]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __snake_case ( self : int ) -> int:
__snake_case : List[Any] = TFTransfoXLModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=lowerCamelCase , d_embed=37 )
def __snake_case ( self : int ) -> List[Any]:
self.config_tester.run_common_tests()
def __snake_case ( self : List[Any] ) -> Dict:
self.model_tester.set_seed()
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> Optional[Any]:
self.model_tester.set_seed()
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> Tuple:
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Any:
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
__snake_case : str = model_class(lowerCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
__snake_case : int = model.get_output_embeddings()
assert isinstance(lowerCamelCase , tf.keras.layers.Layer )
__snake_case : Optional[Any] = model.get_bias()
assert name is None
else:
__snake_case : Optional[int] = model.get_output_embeddings()
assert x is None
__snake_case : Tuple = model.get_bias()
assert name is None
def __snake_case ( self : List[Any] ) -> List[Any]:
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __snake_case ( self : Optional[int] ) -> Dict:
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Union[str, Any] = TFTransfoXLModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." )
def __snake_case ( self : str ) -> Optional[int]:
pass
@require_tf
class a (unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def __snake_case ( self : List[Any] ) -> Tuple:
__snake_case : Dict = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
__snake_case : Union[str, Any] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
__snake_case : List[str] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
__snake_case : Optional[int] = model.generate(lowerCamelCase , max_length=200 , do_sample=lowerCamelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase )
| 81
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
A = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
A = parser.parse_args()
if args.model_type == "bert":
A = BertForMaskedLM.from_pretrained(args.model_name)
A = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
A = model.state_dict()
A = {}
for w in ["word_embeddings", "position_embeddings"]:
A = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
A = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
A = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
A = state_dict['cls.predictions.decoder.weight']
A = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
A = state_dict[f"""cls.predictions.transform.dense.{w}"""]
A = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 320
| 0
|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def a__ (__lowercase :List[Any] , __lowercase :str , __lowercase :List[str] , __lowercase :Tuple , __lowercase :List[Any] ) -> str:
# Load configuration defined in the metadata file
with open(__lowercase ) as metadata_file:
_A : Optional[Any] = json.load(__lowercase )
_A : Tuple = LukeConfig(use_entity_aware_attention=__lowercase , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_A : int = torch.load(__lowercase , map_location='''cpu''' )
# Load the entity vocab file
_A : Dict = load_entity_vocab(__lowercase )
_A : Dict = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_A : Optional[int] = AddedToken('''<ent>''' , lstrip=__lowercase , rstrip=__lowercase )
_A : Any = AddedToken('''<ent2>''' , lstrip=__lowercase , rstrip=__lowercase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(__lowercase , __lowercase )
_A : Any = LukeTokenizer.from_pretrained(__lowercase )
# Initialize the embeddings of the special tokens
_A : List[Any] = state_dict['''embeddings.word_embeddings.weight''']
_A : List[Any] = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_A : Optional[int] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_A : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_A : str = f"""encoder.layer.{layer_index}.attention.self."""
_A : Union[str, Any] = state_dict[prefix + matrix_name]
_A : int = state_dict[prefix + matrix_name]
_A : int = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_A : List[Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
_A : int = entity_emb[entity_vocab['''[MASK]''']]
_A : Tuple = LukeModel(config=__lowercase ).eval()
_A , _A : Dict = model.load_state_dict(__lowercase , strict=__lowercase )
if not (len(__lowercase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"""Missing keys {', '.join(__lowercase )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
f""" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}""" )
# Check outputs
_A : str = LukeTokenizer.from_pretrained(__lowercase , task='''entity_classification''' )
_A : Dict = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_A : Optional[Any] = (39, 42)
_A : int = tokenizer(__lowercase , entity_spans=[span] , add_prefix_space=__lowercase , return_tensors='''pt''' )
_A : int = model(**__lowercase )
# Verify word hidden states
if model_size == "large":
_A : List[Any] = torch.Size((1, 42, 1024) )
_A : Union[str, Any] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_A : Tuple = torch.Size((1, 42, 768) )
_A : List[str] = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_A : Dict = torch.Size((1, 1, 1024) )
_A : int = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_A : int = torch.Size((1, 1, 768) )
_A : str = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__lowercase ) )
model.save_pretrained(__lowercase )
def a__ (__lowercase :int ) -> Dict:
_A : int = {}
with open(__lowercase , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__lowercase ):
_A , _A : Optional[Any] = line.rstrip().split('''\t''' )
_A : int = index
return entity_vocab
if __name__ == "__main__":
_UpperCamelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_UpperCamelCase : List[str] =parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 332
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCamelCase : List[Any] =logging.get_logger(__name__)
_UpperCamelCase : Optional[int] ={
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class UpperCAmelCase__ ( __snake_case ):
__snake_case : str = "instructblip_vision_model"
def __init__( self ,A__=1408 ,A__=6144 ,A__=39 ,A__=16 ,A__=224 ,A__=14 ,A__="gelu" ,A__=1E-6 ,A__=0.0 ,A__=1E-10 ,A__=True ,**A__ ,):
super().__init__(**A__ )
_A : str = hidden_size
_A : Any = intermediate_size
_A : List[Any] = num_hidden_layers
_A : Union[str, Any] = num_attention_heads
_A : Dict = patch_size
_A : List[Any] = image_size
_A : Any = initializer_range
_A : Tuple = attention_dropout
_A : Optional[Any] = layer_norm_eps
_A : Optional[Any] = hidden_act
_A : str = qkv_bias
@classmethod
def A__ ( cls ,A__ ,**A__ ):
cls._set_token_in_kwargs(A__ )
_A , _A : Union[str, Any] = cls.get_config_dict(A__ ,**A__ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
_A : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(A__ ,**A__ )
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Dict = "instructblip_qformer"
def __init__( self ,A__=30522 ,A__=768 ,A__=12 ,A__=12 ,A__=3072 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=512 ,A__=0.02 ,A__=1E-12 ,A__=0 ,A__="absolute" ,A__=2 ,A__=1408 ,**A__ ,):
super().__init__(pad_token_id=A__ ,**A__ )
_A : Optional[int] = vocab_size
_A : Tuple = hidden_size
_A : Any = num_hidden_layers
_A : Optional[int] = num_attention_heads
_A : str = hidden_act
_A : Union[str, Any] = intermediate_size
_A : Optional[int] = hidden_dropout_prob
_A : List[Any] = attention_probs_dropout_prob
_A : List[str] = max_position_embeddings
_A : List[Any] = initializer_range
_A : int = layer_norm_eps
_A : List[Any] = position_embedding_type
_A : Tuple = cross_attention_frequency
_A : Tuple = encoder_hidden_size
@classmethod
def A__ ( cls ,A__ ,**A__ ):
cls._set_token_in_kwargs(A__ )
_A , _A : Optional[int] = cls.get_config_dict(A__ ,**A__ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
_A : List[Any] = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(A__ ,**A__ )
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Any = "instructblip"
__snake_case : Optional[int] = True
def __init__( self ,A__=None ,A__=None ,A__=None ,A__=32 ,**A__ ):
super().__init__(**A__ )
if vision_config is None:
_A : Optional[int] = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
_A : str = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
_A : Optional[Any] = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
_A : List[str] = InstructBlipVisionConfig(**A__ )
_A : Union[str, Any] = InstructBlipQFormerConfig(**A__ )
_A : Optional[Any] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
_A : Union[str, Any] = CONFIG_MAPPING[text_model_type](**A__ )
_A : List[str] = self.text_config.tie_word_embeddings
_A : Dict = self.text_config.is_encoder_decoder
_A : str = num_query_tokens
_A : Union[str, Any] = self.vision_config.hidden_size
_A : List[str] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_A : Any = 1.0
_A : List[str] = 0.02
@classmethod
def A__ ( cls ,A__ ,A__ ,A__ ,**A__ ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**A__ ,)
def A__ ( self ):
_A : Optional[int] = copy.deepcopy(self.__dict__ )
_A : Optional[int] = self.vision_config.to_dict()
_A : Optional[int] = self.qformer_config.to_dict()
_A : str = self.text_config.to_dict()
_A : List[Any] = self.__class__.model_type
return output
| 332
| 1
|
from typing import Any
import numpy as np
def UpperCamelCase__ ( lowerCAmelCase__ ):
return np.array_equal(lowercase__ ,matrix.conjugate().T )
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = v.conjugate().T
lowercase = v_star.dot(lowercase__ )
assert isinstance(lowercase__ ,np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def UpperCamelCase__ ( ):
lowercase = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase = np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), f"""{a} is not hermitian."""
print(rayleigh_quotient(lowercase__ ,lowercase__ ) )
lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), f"""{a} is not hermitian."""
assert rayleigh_quotient(lowercase__ ,lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 428
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase = get_tests_dir('''fixtures''')
class lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : str ):
"""simple docstring"""
__lowercase =mock.Mock()
__lowercase =500
__lowercase ={}
__lowercase =HTTPError
__lowercase ={}
# Download this model to make sure it's in the cache.
__lowercase =WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__lowercase ) as mock_head:
__lowercase =WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case ( self : Dict ):
"""simple docstring"""
__lowercase =WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class lowerCAmelCase ( unittest.TestCase ):
@classmethod
def snake_case ( cls : str ):
"""simple docstring"""
__lowercase =TOKEN
HfFolder.save_token(__lowercase )
@classmethod
def snake_case ( cls : str ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def snake_case ( self : int ):
"""simple docstring"""
__lowercase =WavaVecaFeatureExtractor.from_pretrained(__lowercase )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
__lowercase =WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowercase , getattr(__lowercase , __lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowercase , repo_id='test-feature-extractor' , push_to_hub=__lowercase , use_auth_token=self._token )
__lowercase =WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowercase , getattr(__lowercase , __lowercase ) )
def snake_case ( self : List[Any] ):
"""simple docstring"""
__lowercase =WavaVecaFeatureExtractor.from_pretrained(__lowercase )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
__lowercase =WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowercase , getattr(__lowercase , __lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowercase , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=__lowercase , use_auth_token=self._token )
__lowercase =WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowercase , getattr(__lowercase , __lowercase ) )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
__lowercase =CustomFeatureExtractor.from_pretrained(__lowercase )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
__lowercase =AutoFeatureExtractor.from_pretrained(
f'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=__lowercase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 119
| 0
|
from math import ceil
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = list(range(0 , lowerCamelCase_ ) )
lowercase__ = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowercase__ = []
for i in device_map_blocks:
if device_map_blocks.count(lowerCamelCase_ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowerCamelCase_ )
# Missing blocks
lowercase__ = [i for i in blocks if i not in device_map_blocks]
lowercase__ = [i for i in device_map_blocks if i not in blocks]
if len(lowerCamelCase_ ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(lowerCamelCase_ ) )
if len(lowerCamelCase_ ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(lowerCamelCase_ ) )
if len(lowerCamelCase_ ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(lowerCamelCase_ ) )
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = list(range(lowerCamelCase_ ) )
lowercase__ = int(ceil(n_layers / len(lowerCamelCase_ ) ) )
lowercase__ = [layers[i : i + n_blocks] for i in range(0 , lowerCamelCase_ , lowerCamelCase_ )]
return dict(zip(lowerCamelCase_ , lowerCamelCase_ ) )
| 708
|
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
# we need a list not a string, so do something to change the type
lowercase__ = arr.split(''',''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = [int(self.array[0] )] * len(self.array )
lowercase__ = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
lowercase__ = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
lowercase__ = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A__ : Dict = input('please input some numbers:')
A__ : Union[str, Any] = SubArray(whole_array)
A__ : int = array.solve_sub_array()
print(('the results is:', re))
| 671
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : str = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class UpperCAmelCase_ ( a_ ):
'''simple docstring'''
A : str = 'gpt_bigcode'
A : Union[str, Any] = ['past_key_values']
A : int = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=5_0257 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="gelu_pytorch_tanh" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=5_0256 , _SCREAMING_SNAKE_CASE=5_0256 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
snake_case_ : Dict = vocab_size
snake_case_ : str = n_positions
snake_case_ : Tuple = n_embd
snake_case_ : Any = n_layer
snake_case_ : Optional[int] = n_head
snake_case_ : Optional[Any] = n_inner
snake_case_ : List[Any] = activation_function
snake_case_ : List[Any] = resid_pdrop
snake_case_ : List[str] = embd_pdrop
snake_case_ : Union[str, Any] = attn_pdrop
snake_case_ : str = layer_norm_epsilon
snake_case_ : int = initializer_range
snake_case_ : Optional[Any] = scale_attn_weights
snake_case_ : List[Any] = use_cache
snake_case_ : List[str] = attention_softmax_in_fpaa
snake_case_ : List[str] = scale_attention_softmax_in_fpaa
snake_case_ : Dict = multi_query
snake_case_ : Optional[int] = bos_token_id
snake_case_ : Optional[int] = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 568
|
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
SCREAMING_SNAKE_CASE__ : Tuple =sys.version_info >= (3, 10)
def UpperCamelCase ( SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) ->Optional[int]:
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = 42
__snake_case = 42
__snake_case = 42
__snake_case = 42
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = 42
__snake_case = field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = False
__snake_case = True
__snake_case = None
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = """titi"""
__snake_case = """toto"""
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = """titi"""
__snake_case = """toto"""
__snake_case = 42
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = "toto"
def a__ ( self ) -> Optional[Any]:
_lowerCamelCase : List[Any] = BasicEnum(self.foo )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = "toto"
def a__ ( self ) -> Dict:
_lowerCamelCase : Union[str, Any] = MixedTypeEnum(self.foo )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = None
__snake_case = field(default=a_ , metadata={"""help""": """help message"""} )
__snake_case = None
__snake_case = list_field(default=[] )
__snake_case = list_field(default=[] )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = list_field(default=[] )
__snake_case = list_field(default=[1, 2, 3] )
__snake_case = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
__snake_case = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = field()
__snake_case = field()
__snake_case = field()
def a__ ( self ) -> Dict:
_lowerCamelCase : str = BasicEnum(self.required_enum )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = 42
__snake_case = field()
__snake_case = None
__snake_case = field(default="""toto""" , metadata={"""help""": """help message"""} )
__snake_case = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = False
__snake_case = True
__snake_case = None
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = None
__snake_case = field(default=a_ , metadata={"""help""": """help message"""} )
__snake_case = None
__snake_case = list_field(default=[] )
__snake_case = list_field(default=[] )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_lowerCamelCase : List[str] = {k: v for k, v in vars(_lowercase ).items() if k != '''container'''}
_lowerCamelCase : str = {k: v for k, v in vars(_lowercase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _lowercase ) and yy.get('''choices''' , _lowercase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_lowercase ) , yy['''type'''](_lowercase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowercase , _lowercase )
def a__ ( self ) -> Optional[Any]:
_lowerCamelCase : str = HfArgumentParser(_lowercase )
_lowerCamelCase : Dict = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowercase , required=_lowercase )
expected.add_argument('''--bar''' , type=_lowercase , required=_lowercase )
expected.add_argument('''--baz''' , type=_lowercase , required=_lowercase )
expected.add_argument('''--flag''' , type=_lowercase , default=_lowercase , const=_lowercase , nargs='''?''' )
self.argparsersEqual(_lowercase , _lowercase )
_lowerCamelCase : str = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((_lowerCamelCase), ) : Union[str, Any] = parser.parse_args_into_dataclasses(_lowercase , look_for_args_file=_lowercase )
self.assertFalse(example.flag )
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : Dict = HfArgumentParser(_lowercase )
_lowerCamelCase : List[str] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=_lowercase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowercase , help='''help message''' )
self.argparsersEqual(_lowercase , _lowercase )
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowercase , default=_lowercase , const=_lowercase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_lowercase , default=_lowercase , const=_lowercase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowercase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_lowercase , default=_lowercase )
_lowerCamelCase : Optional[Any] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowercase )
for dataclass_type in dataclass_types:
_lowerCamelCase : Optional[Any] = HfArgumentParser(_lowercase )
self.argparsersEqual(_lowercase , _lowercase )
_lowerCamelCase : List[Any] = parser.parse_args([] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
_lowerCamelCase : Dict = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
_lowerCamelCase : int = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
_lowerCamelCase : Any = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
_lowerCamelCase : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : Union[str, Any] = HfArgumentParser(_lowercase )
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowercase , _lowercase )
_lowerCamelCase : Dict = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_lowerCamelCase : int = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_lowerCamelCase : Optional[Any] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_lowerCamelCase : Any = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_lowerCamelCase : str = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
_lowerCamelCase : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def a__ ( self ) -> Dict:
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = "toto"
_lowerCamelCase : Dict = HfArgumentParser(_lowercase )
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowercase , _lowercase )
_lowerCamelCase : str = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_lowerCamelCase : Optional[int] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_lowerCamelCase : Optional[int] = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : int = HfArgumentParser(_lowercase )
_lowerCamelCase : Any = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowercase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowercase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowercase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowercase )
self.argparsersEqual(_lowercase , _lowercase )
_lowerCamelCase : int = parser.parse_args([] )
self.assertEqual(
_lowercase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
_lowerCamelCase : List[Any] = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_lowercase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def a__ ( self ) -> Optional[Any]:
_lowerCamelCase : str = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_lowercase , type=_lowercase )
expected.add_argument('''--bar''' , default=_lowercase , type=_lowercase , help='''help message''' )
expected.add_argument('''--baz''' , default=_lowercase , type=_lowercase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowercase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowercase )
_lowerCamelCase : str = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowercase )
for dataclass_type in dataclass_types:
_lowerCamelCase : Union[str, Any] = HfArgumentParser(_lowercase )
self.argparsersEqual(_lowercase , _lowercase )
_lowerCamelCase : List[str] = parser.parse_args([] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , bar=_lowercase , baz=_lowercase , ces=[] , des=[] ) )
_lowerCamelCase : List[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_lowercase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def a__ ( self ) -> Any:
_lowerCamelCase : str = HfArgumentParser(_lowercase )
_lowerCamelCase : List[str] = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowercase , required=_lowercase )
expected.add_argument('''--required_str''' , type=_lowercase , required=_lowercase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowercase , )
self.argparsersEqual(_lowercase , _lowercase )
def a__ ( self ) -> Dict:
_lowerCamelCase : Tuple = HfArgumentParser(_lowercase )
_lowerCamelCase : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowercase , required=_lowercase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowercase , )
expected.add_argument('''--opt''' , type=_lowercase , default=_lowercase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowercase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowercase )
self.argparsersEqual(_lowercase , _lowercase )
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : List[Any] = HfArgumentParser(_lowercase )
_lowerCamelCase : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
_lowerCamelCase : Optional[int] = parser.parse_dict(_lowercase )[0]
_lowerCamelCase : str = BasicExample(**_lowercase )
self.assertEqual(_lowercase , _lowercase )
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : Tuple = HfArgumentParser(_lowercase )
_lowerCamelCase : Dict = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_lowercase , parser.parse_dict , _lowercase , allow_extra_keys=_lowercase )
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : Any = HfArgumentParser(_lowercase )
_lowerCamelCase : Dict = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Union[str, Any] = os.path.join(_lowercase , '''temp_json''' )
os.mkdir(_lowercase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_lowercase , _lowercase )
_lowerCamelCase : str = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
_lowerCamelCase : Optional[int] = BasicExample(**_lowercase )
self.assertEqual(_lowercase , _lowercase )
def a__ ( self ) -> Tuple:
_lowerCamelCase : int = HfArgumentParser(_lowercase )
_lowerCamelCase : int = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : List[str] = os.path.join(_lowercase , '''temp_yaml''' )
os.mkdir(_lowercase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_lowercase , _lowercase )
_lowerCamelCase : Optional[int] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
_lowerCamelCase : Tuple = BasicExample(**_lowercase )
self.assertEqual(_lowercase , _lowercase )
def a__ ( self ) -> str:
_lowerCamelCase : List[Any] = HfArgumentParser(_lowercase )
self.assertIsNotNone(_lowercase )
| 434
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase_ (_UpperCAmelCase, unittest.TestCase ):
A__ : Union[str, Any] = XLMTokenizer
A__ : Optional[Any] = False
def lowerCamelCase__ ( self ) ->str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
_a = dict(zip(a_ , range(len(a_ ) ) ) )
_a = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(a_ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(a_ ) )
def lowerCamelCase__ ( self , a_ ) ->Dict:
'''simple docstring'''
_a = "lower newer"
_a = "lower newer"
return input_text, output_text
def lowerCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
_a = XLMTokenizer(self.vocab_file , self.merges_file )
_a = "lower"
_a = ["low", "er</w>"]
_a = tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_a = tokens + ["<unk>"]
_a = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
@slow
def lowerCamelCase__ ( self ) ->int:
'''simple docstring'''
_a = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
_a = tokenizer.encode("sequence builders" , add_special_tokens=a_ )
_a = tokenizer.encode("multi-sequence build" , add_special_tokens=a_ )
_a = tokenizer.build_inputs_with_special_tokens(a_ )
_a = tokenizer.build_inputs_with_special_tokens(a_ , a_ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 612
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCAmelCase ( UpperCamelCase_: str ) -> Union[str, Any]:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_a = model_type_to_module_name(UpperCamelCase_ )
_a = importlib.import_module(f'''.{module_name}''' , "transformers.models" )
try:
return getattr(UpperCamelCase_ , UpperCamelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(UpperCamelCase_ , "__name__" , UpperCamelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_a = importlib.import_module("transformers" )
if hasattr(UpperCamelCase_ , UpperCamelCase_ ):
return getattr(UpperCamelCase_ , UpperCamelCase_ )
return None
def lowerCAmelCase ( UpperCamelCase_: Union[str, os.PathLike] , UpperCamelCase_: Optional[Union[str, os.PathLike]] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[Dict[str, str]] = None , UpperCamelCase_: Optional[Union[bool, str]] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: bool = False , **UpperCamelCase_: Dict , ) -> Optional[int]:
'''simple docstring'''
_a = get_file_from_repo(
UpperCamelCase_ , UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , resume_download=UpperCamelCase_ , proxies=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , local_files_only=UpperCamelCase_ , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(UpperCamelCase_ , encoding="utf-8" ) as reader:
return json.load(UpperCamelCase_ )
class lowercase_ :
def __init__( self ) ->List[Any]:
'''simple docstring'''
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(a_ )
def lowerCamelCase__ ( cls , a_ , **a_ ) ->Dict:
'''simple docstring'''
_a = kwargs.pop("config" , a_ )
_a = kwargs.pop("trust_remote_code" , a_ )
_a = True
_a , _a = ImageProcessingMixin.get_image_processor_dict(a_ , **a_ )
_a = config_dict.get("image_processor_type" , a_ )
_a = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
_a = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_a = config_dict.pop("feature_extractor_type" , a_ )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
_a = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
_a = config_dict["auto_map"]["AutoFeatureExtractor"]
_a = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(a_ , a_ ):
_a = AutoConfig.from_pretrained(a_ , **a_ )
# It could be in `config.image_processor_type``
_a = getattr(a_ , "image_processor_type" , a_ )
if hasattr(a_ , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
_a = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
_a = image_processor_class_from_name(a_ )
_a = image_processor_auto_map is not None
_a = image_processor_class is not None or type(a_ ) in IMAGE_PROCESSOR_MAPPING
_a = resolve_trust_remote_code(
a_ , a_ , a_ , a_ )
if has_remote_code and trust_remote_code:
_a = get_class_from_dynamic_module(
a_ , a_ , **a_ )
_a = kwargs.pop("code_revision" , a_ )
if os.path.isdir(a_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(a_ , **a_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(a_ , **a_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(a_ ) in IMAGE_PROCESSOR_MAPPING:
_a = IMAGE_PROCESSOR_MAPPING[type(a_ )]
return image_processor_class.from_dict(a_ , **a_ )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCamelCase__ ( a_ , a_ ) ->Optional[int]:
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(a_ , a_ )
| 612
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , *lowercase_ :Optional[Any] , **lowercase_ :Dict )-> None:
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 440
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self :List[str] , lowercase_ :int = 7_68 , )-> str:
super().__init__()
A__ = nn.Parameter(torch.zeros(1 , lowercase_ ) )
A__ = nn.Parameter(torch.ones(1 , lowercase_ ) )
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Optional[Union[str, torch.device]] = None , lowercase_ :Optional[torch.dtype] = None , )-> Tuple:
A__ = nn.Parameter(self.mean.to(lowercase_ ).to(lowercase_ ) )
A__ = nn.Parameter(self.std.to(lowercase_ ).to(lowercase_ ) )
return self
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :str )-> Union[str, Any]:
A__ = (embeds - self.mean) * 1.0 / self.std
return embeds
def UpperCAmelCase_ ( self :Any , lowercase_ :List[Any] )-> Tuple:
A__ = (embeds * self.std) + self.mean
return embeds
| 440
| 1
|
'''simple docstring'''
import math
import qiskit
def snake_case_ ( __snake_case : int = 1 , __snake_case : int = 1 , __snake_case : int = 1) -> qiskit.result.counts.Counts:
if (
isinstance(__snake_case , __snake_case)
or isinstance(__snake_case , __snake_case)
or isinstance(__snake_case , __snake_case)
):
raise TypeError('''inputs must be integers.''')
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''')
if (
(math.floor(__snake_case) != input_a)
or (math.floor(__snake_case) != input_a)
or (math.floor(__snake_case) != carry_in)
):
raise ValueError('''inputs must be exact integers.''')
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''')
# build registers
lowerCAmelCase_ = qiskit.QuantumRegister(4 , '''qr''')
lowerCAmelCase_ = qiskit.ClassicalRegister(2 , '''cr''')
# list the entries
lowerCAmelCase_ = [input_a, input_a, carry_in]
lowerCAmelCase_ = qiskit.QuantumCircuit(__snake_case , __snake_case)
for i in range(0 , 3):
if entry[i] == 2:
quantum_circuit.h(__snake_case) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__snake_case) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__snake_case) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3) # ccx = toffoli gate
quantum_circuit.cx(0 , 1)
quantum_circuit.ccx(1 , 2 , 3)
quantum_circuit.cx(1 , 2)
quantum_circuit.cx(0 , 1)
quantum_circuit.measure([2, 3] , __snake_case) # measure the last two qbits
lowerCAmelCase_ = qiskit.Aer.get_backend('''aer_simulator''')
lowerCAmelCase_ = qiskit.execute(__snake_case , __snake_case , shots=1000)
return job.result().get_counts(__snake_case)
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 606
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : str =16
A_ : Any =32
def snake_case_ ( __snake_case : Accelerator , __snake_case : int = 16) -> List[Any]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained('''bert-base-cased''')
lowerCAmelCase_ = load_dataset('''glue''' , '''mrpc''')
def tokenize_function(__snake_case : Optional[Any]):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ = datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''')
def collate_fn(__snake_case : int):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ = 8
else:
lowerCAmelCase_ = None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCAmelCase_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case)
lowerCAmelCase_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ : Any =mocked_dataloaders # noqa: F811
def snake_case_ ( __snake_case : int , __snake_case : int) -> int:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case) == "1":
lowerCAmelCase_ = 2
# New Code #
lowerCAmelCase_ = int(args.gradient_accumulation_steps)
# Initialize accelerator
lowerCAmelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__snake_case)
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''')
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ = config['''lr''']
lowerCAmelCase_ = int(config['''num_epochs'''])
lowerCAmelCase_ = int(config['''seed'''])
lowerCAmelCase_ = int(config['''batch_size'''])
lowerCAmelCase_ = evaluate.load('''glue''' , '''mrpc''')
set_seed(__snake_case)
lowerCAmelCase_ ,lowerCAmelCase_ = get_dataloaders(__snake_case , __snake_case)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ = model.to(accelerator.device)
# Instantiate optimizer
lowerCAmelCase_ = AdamW(params=model.parameters() , lr=__snake_case)
# Instantiate scheduler
lowerCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case)
# Now we train the model
for epoch in range(__snake_case):
model.train()
for step, batch in enumerate(__snake_case):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__snake_case):
lowerCAmelCase_ = model(**__snake_case)
lowerCAmelCase_ = output.loss
accelerator.backward(__snake_case)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
lowerCAmelCase_ = model(**__snake_case)
lowerCAmelCase_ = outputs.logits.argmax(dim=-1)
lowerCAmelCase_ ,lowerCAmelCase_ = accelerator.gather_for_metrics((predictions, batch['''labels''']))
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowerCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __snake_case)
def snake_case_ ( ) -> Optional[Any]:
lowerCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script.''')
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__snake_case , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''')
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case)
if __name__ == "__main__":
main()
| 606
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = {
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase_ = {
'google/realm-cc-news-pretrained-embedder': 5_12,
'google/realm-cc-news-pretrained-encoder': 5_12,
'google/realm-cc-news-pretrained-scorer': 5_12,
'google/realm-cc-news-pretrained-openqa': 5_12,
'google/realm-orqa-nq-openqa': 5_12,
'google/realm-orqa-nq-reader': 5_12,
'google/realm-orqa-wq-openqa': 5_12,
'google/realm-orqa-wq-reader': 5_12,
}
lowerCAmelCase_ = {
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = RealmTokenizer
def __init__( self : Optional[Any] , _A : Any=None , _A : Union[str, Any]=None , _A : Dict=True , _A : Dict="[UNK]" , _A : List[str]="[SEP]" , _A : Optional[Any]="[PAD]" , _A : List[Any]="[CLS]" , _A : Optional[Any]="[MASK]" , _A : Optional[Any]=True , _A : Tuple=None , **_A : Tuple , ) -> int:
"""simple docstring"""
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
lowercase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars
):
lowercase : Dict = getattr(_A , normalizer_state.pop('''type''' ) )
lowercase : Any = do_lower_case
lowercase : Optional[int] = strip_accents
lowercase : Tuple = tokenize_chinese_chars
lowercase : Union[str, Any] = normalizer_class(**_A )
lowercase : List[str] = do_lower_case
def __a ( self : Union[str, Any] , _A : Optional[int] , **_A : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase : Any = PaddingStrategy.MAX_LENGTH
lowercase : Any = text
lowercase : Optional[Any] = kwargs.pop('''text_pair''' , _A )
lowercase : List[str] = kwargs.pop('''return_tensors''' , _A )
lowercase : Any = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(_A ):
if batch_text_pair is not None:
lowercase : str = batch_text_pair[idx]
else:
lowercase : Optional[int] = None
lowercase : Optional[Any] = super().__call__(_A , _A , return_tensors=_A , **_A )
lowercase : Optional[Any] = encoded_candidates.get('''input_ids''' )
lowercase : Optional[Any] = encoded_candidates.get('''attention_mask''' )
lowercase : Tuple = encoded_candidates.get('''token_type_ids''' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_A )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_A )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_A )
lowercase : Tuple = {key: item for key, item in output_data.items() if len(_A ) != 0}
return BatchEncoding(_A , tensor_type=_A )
def __a ( self : Optional[Any] , _A : List[Any] , _A : Dict=None ) -> Tuple:
"""simple docstring"""
lowercase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase : Dict = [self.sep_token_id]
lowercase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : List[str] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase : List[Any] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 217
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case( __magic_name__ ) -> Dict:
'''simple docstring'''
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=__magic_name__ )
lowercase : Any = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__magic_name__ )
EnvironmentCommand.register_subcommand(__magic_name__ )
TestCommand.register_subcommand(__magic_name__ )
RunBeamCommand.register_subcommand(__magic_name__ )
DummyDataCommand.register_subcommand(__magic_name__ )
# Parse args
lowercase , lowercase : Optional[int] = parser.parse_known_args()
if not hasattr(__magic_name__ , '''func''' ):
parser.print_help()
exit(1 )
lowercase : int = parse_unknown_args(__magic_name__ )
# Run
lowercase : str = args.func(__magic_name__ , **__magic_name__ )
service.run()
if __name__ == "__main__":
main()
| 217
| 1
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowerCAmelCase_ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
lowerCAmelCase_ = logging.WARNING
def lowerCamelCase_ ( )-> str:
_snake_case : Union[str, Any] = os.getenv('DATASETS_VERBOSITY' , lowerCamelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def lowerCamelCase_ ( )-> str:
return __name__.split('.' )[0]
def lowerCamelCase_ ( )-> logging.Logger:
return logging.getLogger(_get_library_name() )
def lowerCamelCase_ ( )-> None:
_snake_case : List[str] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowerCamelCase_ ( )-> None:
_snake_case : Dict = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] = None )-> logging.Logger:
if name is None:
_snake_case : Union[str, Any] = _get_library_name()
return logging.getLogger(lowerCamelCase__ )
def lowerCamelCase_ ( )-> int:
return _get_library_root_logger().getEffectiveLevel()
def lowerCamelCase_ ( lowerCAmelCase: List[str] )-> None:
_get_library_root_logger().setLevel(lowerCamelCase__ )
def lowerCamelCase_ ( )-> Union[str, Any]:
return set_verbosity(lowerCamelCase__ )
def lowerCamelCase_ ( )-> List[Any]:
return set_verbosity(lowerCamelCase__ )
def lowerCamelCase_ ( )-> Optional[int]:
return set_verbosity(lowerCamelCase__ )
def lowerCamelCase_ ( )-> Dict:
return set_verbosity(lowerCamelCase__ )
def lowerCamelCase_ ( )-> None:
_snake_case : Dict = False
def lowerCamelCase_ ( )-> None:
_snake_case : Optional[Any] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Tuple ): # pylint: disable=unused-argument
'''simple docstring'''
_snake_case : List[Any] = args[0] if args else None
def __iter__( self : int ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
def empty_fn(*UpperCamelCase : Tuple , **UpperCamelCase : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Dict ):
'''simple docstring'''
return self
def __exit__( self : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Any ):
'''simple docstring'''
return
lowerCAmelCase_ = True
class _lowerCAmelCase :
'''simple docstring'''
def __call__( self : Dict , *UpperCamelCase : List[Any] , UpperCamelCase : int=False , **UpperCamelCase : List[Any] ):
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*UpperCamelCase_ , **UpperCamelCase_ )
else:
return EmptyTqdm(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase_ ( self : List[str] , *UpperCamelCase : Any , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCAmelCase_ = _tqdm_cls()
def lowerCamelCase_ ( )-> bool:
global _tqdm_active
return bool(_tqdm_active )
def lowerCamelCase_ ( )-> List[Any]:
global _tqdm_active
_snake_case : Dict = True
def lowerCamelCase_ ( )-> Any:
global _tqdm_active
_snake_case : List[Any] = False
| 710
|
def lowerCamelCase_ ( lowerCAmelCase: int )-> list:
_snake_case : List[Any] = int(lowerCAmelCase )
if n_element < 1:
_snake_case : int = ValueError('a should be a positive number' )
raise my_error
_snake_case : Union[str, Any] = [1]
_snake_case , _snake_case , _snake_case : Any = (0, 0, 0)
_snake_case : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 669
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''convbert'''
def __init__( self : List[Any] , _A : Union[str, Any]=3_0522 , _A : Dict=768 , _A : int=12 , _A : Union[str, Any]=12 , _A : int=3072 , _A : Optional[int]="gelu" , _A : Optional[Any]=0.1 , _A : Optional[Any]=0.1 , _A : Union[str, Any]=512 , _A : List[str]=2 , _A : List[Any]=0.02 , _A : Union[str, Any]=1e-12 , _A : Union[str, Any]=1 , _A : List[str]=0 , _A : Optional[Any]=2 , _A : int=768 , _A : List[str]=2 , _A : str=9 , _A : List[Any]=1 , _A : Optional[Any]=None , **_A : List[str] , ):
"""simple docstring"""
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A , )
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Any = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Any = type_vocab_size
__SCREAMING_SNAKE_CASE : Tuple = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = embedding_size
__SCREAMING_SNAKE_CASE : str = head_ratio
__SCREAMING_SNAKE_CASE : Tuple = conv_kernel_size
__SCREAMING_SNAKE_CASE : int = num_groups
__SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__SCREAMING_SNAKE_CASE : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 74
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : Dict = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = """conditional_detr"""
UpperCAmelCase_ : Optional[int] = ["""past_key_values"""]
UpperCAmelCase_ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=300 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="sine" , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.2_5 , **__SCREAMING_SNAKE_CASE , ) ->Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowerCAmelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = backbone_config.get('''model_type''' )
lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase = config_class.from_dict(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = use_timm_backbone
lowerCAmelCase = backbone_config
lowerCAmelCase = num_channels
lowerCAmelCase = num_queries
lowerCAmelCase = d_model
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = init_xavier_std
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = encoder_layers
lowerCAmelCase = auxiliary_loss
lowerCAmelCase = position_embedding_type
lowerCAmelCase = backbone
lowerCAmelCase = use_pretrained_backbone
lowerCAmelCase = dilation
# Hungarian matcher
lowerCAmelCase = class_cost
lowerCAmelCase = bbox_cost
lowerCAmelCase = giou_cost
# Loss coefficients
lowerCAmelCase = mask_loss_coefficient
lowerCAmelCase = dice_loss_coefficient
lowerCAmelCase = cls_loss_coefficient
lowerCAmelCase = bbox_loss_coefficient
lowerCAmelCase = giou_loss_coefficient
lowerCAmelCase = focal_alpha
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return self.d_model
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCAmelCase = self.backbone_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->float:
return 1e-5
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return 12
| 312
| 0
|
'''simple docstring'''
def a_ ( lowerCAmelCase_ : Tuple ):
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def a_ ( lowerCAmelCase_ : Any ):
__lowerCAmelCase = credit_card_number
__lowerCAmelCase = 0
__lowerCAmelCase = len(lowerCAmelCase_ ) - 2
for i in range(lowerCAmelCase_, -1, -2 ):
# double the value of every second digit
__lowerCAmelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
__lowerCAmelCase = cc_number[:i] + str(lowerCAmelCase_ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCAmelCase_ ) - 1, -1, -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def a_ ( lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = F"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(F"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(lowerCAmelCase_ ) <= 16:
print(F"""{error_message} of its length.""" )
return False
if not validate_initial_digits(lowerCAmelCase_ ):
print(F"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(lowerCAmelCase_ ):
print(F"""{error_message} it fails the Luhn check.""" )
return False
print(F"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 709
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : List[Any] = '▁'
_snake_case : Tuple = {'vocab_file': 'spiece.model'}
_snake_case : Optional[int] = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
_snake_case : Union[str, Any] = {
'google/reformer-crime-and-punishment': 524288,
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : Any="<unk>" , lowerCAmelCase_ : List[Any]=[] , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : Any , ) -> None:
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
__lowerCAmelCase = vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def lowercase ( self : Any ) -> Any:
return self.sp_model.get_piece_size()
def lowercase ( self : int ) -> Dict[str, int]:
__lowerCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Any:
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self : Dict , lowerCAmelCase_ : str ) -> str:
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self : int , lowerCAmelCase_ : str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : Dict ) -> Tuple:
return self.sp_model.piece_to_id(lowerCAmelCase_ )
def lowercase ( self : Any , lowerCAmelCase_ : int ) -> Optional[int]:
if index < self.sp_model.get_piece_size():
__lowerCAmelCase = self.sp_model.IdToPiece(lowerCAmelCase_ )
return token
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = []
__lowerCAmelCase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
__lowerCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def lowercase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , 'wb' ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 421
| 0
|
"""simple docstring"""
from collections import deque
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
lowercase__ : Optional[int] = len(__lowerCamelCase )
lowercase__ : str = deque()
lowercase__ : Optional[Any] = [False for _ in range(__lowerCamelCase )]
lowercase__ : Union[str, Any] = [-1 for _ in range(__lowerCamelCase )]
lowercase__ : Optional[int] = index_of[:]
def strong_connect(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
lowercase__ : Dict = index # the number when this node is seen
lowercase__ : Union[str, Any] = index # lowest rank node reachable from here
index += 1
stack.append(__lowerCamelCase )
lowercase__ : Any = True
for w in g[v]:
if index_of[w] == -1:
lowercase__ : int = strong_connect(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase__ : Tuple = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowercase__ : str = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowercase__ : Union[str, Any] = []
lowercase__ : List[Any] = stack.pop()
lowercase__ : Union[str, Any] = False
component.append(__lowerCamelCase )
while w != v:
lowercase__ : int = stack.pop()
lowercase__ : Optional[int] = False
component.append(__lowerCamelCase )
components.append(__lowerCamelCase )
return index
lowercase__ : int = []
for v in range(__lowerCamelCase ):
if index_of[v] == -1:
strong_connect(__lowerCamelCase , 0 , __lowerCamelCase )
return components
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
lowercase__ : List[str] = [[] for _ in range(__lowerCamelCase )]
for u, v in edges:
g[u].append(__lowerCamelCase )
return g
if __name__ == "__main__":
# Test
lowerCAmelCase_ = 7
lowerCAmelCase_ = [0, 0, 1, 2, 3, 3, 4, 4, 6]
lowerCAmelCase_ = [1, 3, 2, 0, 1, 4, 5, 6, 5]
lowerCAmelCase_ = [(u, v) for u, v in zip(source, target)]
lowerCAmelCase_ = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 560
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Tuple = "SpeechT5FeatureExtractor"
lowerCAmelCase : Optional[Any] = "SpeechT5Tokenizer"
def __init__( self : Optional[int] ,_snake_case : Union[str, Any] ,_snake_case : str ) -> Tuple:
"""simple docstring"""
super().__init__(_snake_case ,_snake_case )
def __call__( self : str ,*_snake_case : str ,**_snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = kwargs.pop('''audio''' ,_snake_case )
lowercase__ : Union[str, Any] = kwargs.pop('''text''' ,_snake_case )
lowercase__ : Tuple = kwargs.pop('''text_target''' ,_snake_case )
lowercase__ : str = kwargs.pop('''audio_target''' ,_snake_case )
lowercase__ : Union[str, Any] = kwargs.pop('''sampling_rate''' ,_snake_case )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
lowercase__ : Optional[Any] = self.feature_extractor(_snake_case ,*_snake_case ,sampling_rate=_snake_case ,**_snake_case )
elif text is not None:
lowercase__ : Dict = self.tokenizer(_snake_case ,**_snake_case )
else:
lowercase__ : int = None
if audio_target is not None:
lowercase__ : Tuple = self.feature_extractor(audio_target=_snake_case ,*_snake_case ,sampling_rate=_snake_case ,**_snake_case )
lowercase__ : List[Any] = targets['''input_values''']
elif text_target is not None:
lowercase__ : int = self.tokenizer(_snake_case ,**_snake_case )
lowercase__ : Union[str, Any] = targets['''input_ids''']
else:
lowercase__ : List[str] = None
if inputs is None:
return targets
if targets is not None:
lowercase__ : Tuple = labels
lowercase__ : Optional[int] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase__ : str = decoder_attention_mask
return inputs
def UpperCAmelCase ( self : str ,*_snake_case : List[Any] ,**_snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ : List[str] = kwargs.pop('''input_values''' ,_snake_case )
lowercase__ : Any = kwargs.pop('''input_ids''' ,_snake_case )
lowercase__ : List[Any] = kwargs.pop('''labels''' ,_snake_case )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
lowercase__ : List[Any] = self.feature_extractor.pad(_snake_case ,*_snake_case ,**_snake_case )
elif input_ids is not None:
lowercase__ : Union[str, Any] = self.tokenizer.pad(_snake_case ,**_snake_case )
else:
lowercase__ : int = None
if labels is not None:
if "input_ids" in labels or (isinstance(_snake_case ,_snake_case ) and "input_ids" in labels[0]):
lowercase__ : List[Any] = self.tokenizer.pad(_snake_case ,**_snake_case )
lowercase__ : Optional[int] = targets['''input_ids''']
else:
lowercase__ : int = self.feature_extractor.feature_size
lowercase__ : str = self.feature_extractor.num_mel_bins
lowercase__ : int = self.feature_extractor.pad(_snake_case ,*_snake_case ,**_snake_case )
lowercase__ : Tuple = feature_size_hack
lowercase__ : int = targets['''input_values''']
else:
lowercase__ : Union[str, Any] = None
if inputs is None:
return targets
if targets is not None:
lowercase__ : str = labels
lowercase__ : str = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase__ : Tuple = decoder_attention_mask
return inputs
def UpperCAmelCase ( self : Optional[int] ,*_snake_case : List[Any] ,**_snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : List[str] ,*_snake_case : Dict ,**_snake_case : str ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case ,**_snake_case )
| 560
| 1
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __get__( self : List[Any] , __lowercase : int , __lowercase : str=None ):
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
snake_case_ = "__cached_" + self.fget.__name__
snake_case_ = getattr(__lowercase , __lowercase , __lowercase )
if cached is None:
snake_case_ = self.fget(__lowercase )
setattr(__lowercase , __lowercase , __lowercase )
return cached
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"invalid truth value {val!r}" )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if is_torch_fx_proxy(_A ):
return True
if is_torch_available():
import torch
if isinstance(_A , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_A , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_A , (jnp.ndarray, Tracer) ):
return True
return isinstance(_A , np.ndarray )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return isinstance(_A , np.ndarray )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return _is_numpy(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
import torch
return isinstance(_A , torch.Tensor )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
import torch
return isinstance(_A , torch.device )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
import torch
if isinstance(_A , _A ):
if hasattr(_A , _A ):
snake_case_ = getattr(_A , _A )
else:
return False
return isinstance(_A , torch.dtype )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
import tensorflow as tf
return isinstance(_A , tf.Tensor )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_A , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(_A )
return type(_A ) == tf.Tensor
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(_A , jnp.ndarray )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return False if not is_flax_available() else _is_jax(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if isinstance(_A , (dict, UserDict) ):
return {k: to_py_obj(_A ) for k, v in obj.items()}
elif isinstance(_A , (list, tuple) ):
return [to_py_obj(_A ) for o in obj]
elif is_tf_tensor(_A ):
return obj.numpy().tolist()
elif is_torch_tensor(_A ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_A ):
return np.asarray(_A ).tolist()
elif isinstance(_A , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if isinstance(_A , (dict, UserDict) ):
return {k: to_numpy(_A ) for k, v in obj.items()}
elif isinstance(_A , (list, tuple) ):
return np.array(_A )
elif is_tf_tensor(_A ):
return obj.numpy()
elif is_torch_tensor(_A ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_A ):
return np.asarray(_A )
else:
return obj
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = fields(self )
# Safety and consistency checks
if not len(__lowercase ):
raise ValueError(f"{self.__class__.__name__} has no fields." )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"{self.__class__.__name__} should not have more than one required field." )
snake_case_ = getattr(self , class_fields[0].name )
snake_case_ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__lowercase ):
if isinstance(__lowercase , __lowercase ):
snake_case_ = first_field.items()
snake_case_ = True
else:
try:
snake_case_ = iter(__lowercase )
snake_case_ = True
except TypeError:
snake_case_ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__lowercase ):
if (
not isinstance(__lowercase , (list, tuple) )
or not len(__lowercase ) == 2
or not isinstance(element[0] , __lowercase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
snake_case_ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"Cannot set key/value for {element}. It needs to be a tuple (key, value)." )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
snake_case_ = element[1]
elif first_field is not None:
snake_case_ = first_field
else:
for field in class_fields:
snake_case_ = getattr(self , field.name )
if v is not None:
snake_case_ = v
def __delitem__( self : Any , *__lowercase : Optional[Any] , **__lowercase : List[str] ):
"""simple docstring"""
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance." )
def snake_case__ ( self : Optional[Any] , *__lowercase : Optional[Any] , **__lowercase : Dict ):
"""simple docstring"""
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance." )
def snake_case__ ( self : List[str] , *__lowercase : Optional[Any] , **__lowercase : Dict ):
"""simple docstring"""
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance." )
def snake_case__ ( self : Optional[int] , *__lowercase : Tuple , **__lowercase : str ):
"""simple docstring"""
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance." )
def __getitem__( self : Dict , __lowercase : Optional[int] ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ):
snake_case_ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Tuple , __lowercase : Dict , __lowercase : Union[str, Any] ):
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__lowercase , __lowercase )
super().__setattr__(__lowercase , __lowercase )
def __setitem__( self : Optional[int] , __lowercase : str , __lowercase : List[str] ):
"""simple docstring"""
super().__setitem__(__lowercase , __lowercase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__lowercase , __lowercase )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@classmethod
def snake_case__ ( cls : Union[str, Any] , __lowercase : Tuple ):
"""simple docstring"""
raise ValueError(
f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}" )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = '''longest'''
lowerCAmelCase_ = '''max_length'''
lowerCAmelCase_ = '''do_not_pad'''
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = '''pt'''
lowerCAmelCase_ = '''tf'''
lowerCAmelCase_ = '''np'''
lowerCAmelCase_ = '''jax'''
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , __lowercase : List[ContextManager] ):
"""simple docstring"""
snake_case_ = context_managers
snake_case_ = ExitStack()
def __enter__( self : Optional[Any] ):
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(__lowercase )
def __exit__( self : Union[str, Any] , *__lowercase : Dict , **__lowercase : Dict ):
"""simple docstring"""
self.stack.__exit__(*__lowercase , **__lowercase )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = infer_framework(_A )
if framework == "tf":
snake_case_ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
snake_case_ = inspect.signature(model_class.forward ) # PyTorch models
else:
snake_case_ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = model_class.__name__
snake_case_ = infer_framework(_A )
if framework == "tf":
snake_case_ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
snake_case_ = inspect.signature(model_class.forward ) # PyTorch models
else:
snake_case_ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowerCamelCase__ ( _A , _A = "" , _A = "." ):
'''simple docstring'''
def _flatten_dict(_A , _A="" , _A="." ):
for k, v in d.items():
snake_case_ = str(_A ) + delimiter + str(_A ) if parent_key else k
if v and isinstance(_A , _A ):
yield from flatten_dict(_A , _A , delimiter=_A ).items()
else:
yield key, v
return dict(_flatten_dict(_A , _A , _A ) )
@contextmanager
def lowerCamelCase__ ( _A , _A = False ):
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowerCamelCase__ ( _A , _A=None ):
'''simple docstring'''
if is_numpy_array(_A ):
return np.transpose(_A , axes=_A )
elif is_torch_tensor(_A ):
return array.T if axes is None else array.permute(*_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.transpose(_A , perm=_A )
elif is_jax_tensor(_A ):
return jnp.transpose(_A , axes=_A )
else:
raise ValueError(f"Type not supported for transpose: {type(_A )}." )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
if is_numpy_array(_A ):
return np.reshape(_A , _A )
elif is_torch_tensor(_A ):
return array.reshape(*_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.reshape(_A , _A )
elif is_jax_tensor(_A ):
return jnp.reshape(_A , _A )
else:
raise ValueError(f"Type not supported for reshape: {type(_A )}." )
def lowerCamelCase__ ( _A , _A=None ):
'''simple docstring'''
if is_numpy_array(_A ):
return np.squeeze(_A , axis=_A )
elif is_torch_tensor(_A ):
return array.squeeze() if axis is None else array.squeeze(dim=_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.squeeze(_A , axis=_A )
elif is_jax_tensor(_A ):
return jnp.squeeze(_A , axis=_A )
else:
raise ValueError(f"Type not supported for squeeze: {type(_A )}." )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
if is_numpy_array(_A ):
return np.expand_dims(_A , _A )
elif is_torch_tensor(_A ):
return array.unsqueeze(dim=_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.expand_dims(_A , axis=_A )
elif is_jax_tensor(_A ):
return jnp.expand_dims(_A , axis=_A )
else:
raise ValueError(f"Type not supported for expand_dims: {type(_A )}." )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if is_numpy_array(_A ):
return np.size(_A )
elif is_torch_tensor(_A ):
return array.numel()
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.size(_A )
elif is_jax_tensor(_A ):
return array.size
else:
raise ValueError(f"Type not supported for expand_dims: {type(_A )}." )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(_A , (tuple, list) ):
snake_case_ = [f"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
snake_case_ = f"{repo_id}--{value}"
return auto_map
def lowerCamelCase__ ( _A ):
'''simple docstring'''
for base_class in inspect.getmro(_A ):
snake_case_ = base_class.__module__
snake_case_ = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"Could not infer framework from class {model_class}." )
| 139
|
from __future__ import annotations
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = len(_A )
# We need to create solution object to save path.
snake_case_ = [[0 for _ in range(_A )] for _ in range(_A )]
snake_case_ = run_maze(_A , 0 , 0 , _A )
if solved:
print("\n".join(str(_A ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = len(_A )
# Final check point.
if i == j == (size - 1):
snake_case_ = 1
return True
snake_case_ = (not i < 0) and (not j < 0) # Check lower bounds
snake_case_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
snake_case_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
snake_case_ = 1
# check for directions
if (
run_maze(_A , i + 1 , _A , _A )
or run_maze(_A , _A , j + 1 , _A )
or run_maze(_A , i - 1 , _A , _A )
or run_maze(_A , _A , j - 1 , _A )
):
return True
snake_case_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139
| 1
|
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Any:
"""simple docstring"""
UpperCamelCase = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
UpperCamelCase = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(UpperCAmelCase_ ):
os.makedirs(UpperCAmelCase_ )
UpperCamelCase = model.state_dict()
def to_tf_var_name(UpperCAmelCase_ ):
for patt, repl in iter(UpperCAmelCase_ ):
UpperCamelCase = name.replace(UpperCAmelCase_ , UpperCAmelCase_ )
return F"bert/{name}"
def create_tf_var(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCamelCase = tf.dtypes.as_dtype(tensor.dtype )
UpperCamelCase = tf.get_variable(dtype=UpperCAmelCase_ , shape=tensor.shape , name=UpperCAmelCase_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(UpperCAmelCase_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCamelCase = to_tf_var_name(UpperCAmelCase_ )
UpperCamelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCamelCase = torch_tensor.T
UpperCamelCase = create_tf_var(tensor=UpperCAmelCase_ , name=UpperCAmelCase_ , session=UpperCAmelCase_ )
tf.keras.backend.set_value(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = session.run(UpperCAmelCase_ )
print(F"Successfully created {tf_name}: {np.allclose(UpperCAmelCase_ , UpperCAmelCase_ )}" )
UpperCamelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , model_name.replace("-" , "_" ) + ".ckpt" ) )
def lowerCamelCase__ ( UpperCAmelCase_=None )-> Tuple:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Directory in which to save tensorflow model" )
UpperCamelCase = parser.parse_args(UpperCAmelCase_ )
UpperCamelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=UpperCAmelCase_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 554
|
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a ( _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[int] = LongformerTokenizer
UpperCamelCase_ : str = True
UpperCamelCase_ : Tuple = LongformerTokenizerFast
UpperCamelCase_ : List[str] = True
def _SCREAMING_SNAKE_CASE ( self : Dict )-> Any:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
UpperCamelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase = {"unk_token": "<unk>"}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **UpperCAmelCase_ : Dict )-> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , **UpperCAmelCase_ : Optional[Any] )-> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase_ : Dict )-> List[Any]:
"""simple docstring"""
UpperCamelCase = "lower newer"
UpperCamelCase = "lower newer"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase = "lower newer"
UpperCamelCase = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
UpperCamelCase = tokenizer.tokenize(UpperCAmelCase_ ) # , add_prefix_space=True)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Tuple:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=UpperCAmelCase_ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=UpperCAmelCase_ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def _SCREAMING_SNAKE_CASE ( self : int )-> int:
"""simple docstring"""
UpperCamelCase = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
UpperCamelCase = tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase_ )
UpperCamelCase = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase_ )
UpperCamelCase = tokenizer.encode(
"sequence builders" , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ )
UpperCamelCase = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = "Encode this sequence."
UpperCamelCase = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
UpperCamelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ )
UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ )
UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
UpperCamelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Testing spaces after special tokens
UpperCamelCase = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )} ) # mask token has a left space
UpperCamelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
UpperCamelCase = "Encode <mask> sequence"
UpperCamelCase = "Encode <mask>sequence"
UpperCamelCase = tokenizer.encode(UpperCAmelCase_ )
UpperCamelCase = encoded.index(UpperCAmelCase_ )
UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = tokenizer.encode(UpperCAmelCase_ )
UpperCamelCase = encoded.index(UpperCAmelCase_ )
UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> List[str]:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
UpperCamelCase = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
UpperCamelCase = "A, <mask> AllenNLP sentence."
UpperCamelCase = tokenizer_r.encode_plus(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ )
UpperCamelCase = tokenizer_p.encode_plus(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
UpperCAmelCase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def _SCREAMING_SNAKE_CASE ( self : Any )-> Dict:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ )
UpperCamelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCamelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , UpperCAmelCase_ )
self.assertEqual(post_processor_state["add_prefix_space"] , UpperCAmelCase_ )
self.assertEqual(post_processor_state["trim_offsets"] , UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase = f"{text_of_1_token} {text_of_1_token}"
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ )
UpperCamelCase = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase_ ) + 1, len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ )
UpperCamelCase = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase_ ) + 1, len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ )
UpperCamelCase = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase_ ), len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ )
UpperCamelCase = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase_ ), len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , )
UpperCamelCase = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ )
UpperCamelCase = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase_ ) + 1, 1 + len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ )
UpperCamelCase = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase_ ), 1 + len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ )
UpperCamelCase = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase_ ), 1 + len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , )
| 554
| 1
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
# General docstring
__SCREAMING_SNAKE_CASE : Tuple = 'ResNetConfig'
# Base docstring
__SCREAMING_SNAKE_CASE : str = 'microsoft/resnet-50'
__SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 2_0_4_8, 7, 7]
# Image classification docstring
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'microsoft/resnet-50'
__SCREAMING_SNAKE_CASE : Any = 'tiger cat'
__SCREAMING_SNAKE_CASE : int = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class lowercase_ ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ = 3 , lowercase_ = 1 , lowercase_ = "relu" ):
super().__init__()
_snake_case : Any = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , bias=lowercase_ )
_snake_case : Dict = nn.BatchNormad(lowercase_ )
_snake_case : Optional[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCamelCase ( self , lowercase_ ):
_snake_case : List[str] = self.convolution(lowercase_ )
_snake_case : Union[str, Any] = self.normalization(lowercase_ )
_snake_case : List[Any] = self.activation(lowercase_ )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self , lowercase_ ):
super().__init__()
_snake_case : Dict = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case : int = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case : List[Any] = config.num_channels
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Dict = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
_snake_case : int = self.embedder(lowercase_ )
_snake_case : Optional[int] = self.pooler(lowercase_ )
return embedding
class lowercase_ ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ = 2 ):
super().__init__()
_snake_case : Dict = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
_snake_case : Any = nn.BatchNormad(lowercase_ )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Optional[int] = self.convolution(lowercase_ )
_snake_case : List[str] = self.normalization(lowercase_ )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ = 1 , lowercase_ = "relu" ):
super().__init__()
_snake_case : Optional[Any] = in_channels != out_channels or stride != 1
_snake_case : int = (
ResNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case : Union[str, Any] = nn.Sequential(
ResNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ ) , ResNetConvLayer(lowercase_ , lowercase_ , activation=lowercase_ ) , )
_snake_case : Optional[int] = ACTaFN[activation]
def UpperCamelCase ( self , lowercase_ ):
_snake_case : List[Any] = hidden_state
_snake_case : List[str] = self.layer(lowercase_ )
_snake_case : Tuple = self.shortcut(lowercase_ )
hidden_state += residual
_snake_case : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ = 1 , lowercase_ = "relu" , lowercase_ = 4 ):
super().__init__()
_snake_case : List[str] = in_channels != out_channels or stride != 1
_snake_case : List[Any] = out_channels // reduction
_snake_case : List[str] = (
ResNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case : int = nn.Sequential(
ResNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 ) , ResNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ ) , ResNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
_snake_case : int = ACTaFN[activation]
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Any = hidden_state
_snake_case : Union[str, Any] = self.layer(lowercase_ )
_snake_case : Dict = self.shortcut(lowercase_ )
hidden_state += residual
_snake_case : int = self.activation(lowercase_ )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 2 , lowercase_ = 2 , ):
super().__init__()
_snake_case : str = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
_snake_case : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase_ , lowercase_ , stride=lowercase_ , activation=config.hidden_act ) , *[layer(lowercase_ , lowercase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Tuple = input
for layer in self.layers:
_snake_case : Any = layer(lowercase_ )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self , lowercase_ ):
super().__init__()
_snake_case : List[str] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def UpperCamelCase ( self , lowercase_ , lowercase_ = False , lowercase_ = True ):
_snake_case : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case : Union[str, Any] = hidden_states + (hidden_state,)
_snake_case : Optional[int] = stage_module(lowercase_ )
if output_hidden_states:
_snake_case : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase_ , hidden_states=lowercase_ , )
class lowercase_ ( __snake_case ):
_lowerCamelCase = ResNetConfig
_lowerCamelCase = 'resnet'
_lowerCamelCase = 'pixel_values'
_lowerCamelCase = True
def UpperCamelCase ( self , lowercase_ ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def UpperCamelCase ( self , lowercase_ , lowercase_=False ):
if isinstance(lowercase_ , lowercase_ ):
_snake_case : Tuple = value
__SCREAMING_SNAKE_CASE : Tuple = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__SCREAMING_SNAKE_CASE : Tuple = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , __snake_case , )
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ ):
super().__init__(lowercase_ )
_snake_case : Dict = config
_snake_case : List[str] = ResNetEmbeddings(lowercase_ )
_snake_case : Dict = ResNetEncoder(lowercase_ )
_snake_case : List[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None ):
_snake_case : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case : str = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : Tuple = self.embedder(lowercase_ )
_snake_case : Any = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
_snake_case : int = encoder_outputs[0]
_snake_case : str = self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __snake_case , )
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ ):
super().__init__(lowercase_ )
_snake_case : Tuple = config.num_labels
_snake_case : Any = ResNetModel(lowercase_ )
# classification head
_snake_case : Optional[int] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , ):
_snake_case : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : Optional[int] = self.resnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
_snake_case : int = outputs.pooler_output if return_dict else outputs[1]
_snake_case : Any = self.classifier(lowercase_ )
_snake_case : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case : List[Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case : Tuple = "single_label_classification"
else:
_snake_case : List[str] = "multi_label_classification"
if self.config.problem_type == "regression":
_snake_case : List[str] = MSELoss()
if self.num_labels == 1:
_snake_case : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case : Dict = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
_snake_case : Tuple = CrossEntropyLoss()
_snake_case : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case : Union[str, Any] = BCEWithLogitsLoss()
_snake_case : Tuple = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
_snake_case : List[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , __snake_case , )
class lowercase_ ( __snake_case , __snake_case ):
def __init__( self , lowercase_ ):
super().__init__(lowercase_ )
super()._init_backbone(lowercase_ )
_snake_case : Optional[int] = [config.embedding_size] + config.hidden_sizes
_snake_case : List[Any] = ResNetEmbeddings(lowercase_ )
_snake_case : Optional[Any] = ResNetEncoder(lowercase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@replace_return_docstrings(output_type=lowercase_ , config_class=_CONFIG_FOR_DOC )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None ):
_snake_case : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case : int = self.embedder(lowercase_ )
_snake_case : Union[str, Any] = self.encoder(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
_snake_case : Optional[Any] = outputs.hidden_states
_snake_case : List[Any] = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case : int = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase_ , )
| 705
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def snake_case (__lowercase ) -> List[Any]:
'''simple docstring'''
_snake_case : int = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowercase_ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = StableDiffusionLatentUpscalePipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
_lowerCamelCase = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
_lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase = frozenset([] )
_lowerCamelCase = True
@property
def UpperCamelCase ( self ):
_snake_case : Optional[int] = 1
_snake_case : Any = 4
_snake_case : int = (16, 16)
_snake_case : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase_ )
return image
def UpperCamelCase ( self ):
torch.manual_seed(0 )
_snake_case : Tuple = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=lowercase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=lowercase_ , only_cross_attention=lowercase_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
_snake_case : Any = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
_snake_case : Union[str, Any] = EulerDiscreteScheduler(prediction_type="sample" )
_snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="quick_gelu" , projection_dim=512 , )
_snake_case : Optional[int] = CLIPTextModel(lowercase_ )
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_snake_case : int = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCamelCase ( self , lowercase_ , lowercase_=0 ):
if str(lowercase_ ).startswith("mps" ):
_snake_case : Dict = torch.manual_seed(lowercase_ )
else:
_snake_case : Tuple = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
_snake_case : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def UpperCamelCase ( self ):
_snake_case : Tuple = "cpu"
_snake_case : List[Any] = self.get_dummy_components()
_snake_case : List[str] = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
_snake_case : Union[str, Any] = self.get_dummy_inputs(lowercase_ )
_snake_case : Dict = pipe(**lowercase_ ).images
_snake_case : Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
_snake_case : Union[str, Any] = np.array(
[0.47_222_412, 0.41_921_633, 0.44_717_434, 0.46_874_192, 0.42_588_258, 0.46_150_726, 0.4_677_534, 0.45_583_832, 0.48_579_055] )
_snake_case : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase_ , 1e-3 )
def UpperCamelCase ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def UpperCamelCase ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def UpperCamelCase ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def UpperCamelCase ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def UpperCamelCase ( self ):
super().test_save_load_local(expected_max_difference=3e-3 )
def UpperCamelCase ( self ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def UpperCamelCase ( self ):
_snake_case : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
_snake_case : Dict = self.get_dummy_components()
_snake_case : Optional[Any] = self.pipeline_class(**lowercase_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
_snake_case : List[Any] = self.get_dummy_inputs(lowercase_ )
_snake_case : int = 2
_snake_case : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_snake_case : Union[str, Any] = getattr(lowercase_ , scheduler_enum.name )
_snake_case : Dict = scheduler_cls.from_config(pipe.scheduler.config )
_snake_case : Optional[int] = pipe(**lowercase_ )[0]
outputs.append(lowercase_ )
assert check_same_shape(lowercase_ )
@require_torch_gpu
@slow
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
_snake_case : Any = torch.manual_seed(33 )
_snake_case : Optional[Any] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
_snake_case : int = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
_snake_case : int = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
_snake_case : Union[str, Any] = pipe(lowercase_ , generator=lowercase_ , output_type="latent" ).images
_snake_case : Dict = upscaler(
prompt=lowercase_ , image=lowercase_ , num_inference_steps=20 , guidance_scale=0 , generator=lowercase_ , output_type="np" , ).images[0]
_snake_case : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def UpperCamelCase ( self ):
_snake_case : Dict = torch.manual_seed(33 )
_snake_case : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
_snake_case : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
_snake_case : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
_snake_case : Any = upscaler(
prompt=lowercase_ , image=lowercase_ , num_inference_steps=20 , guidance_scale=0 , generator=lowercase_ , output_type="np" , ).images[0]
_snake_case : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 580
| 0
|
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Tuple = int(UpperCAmelCase )
lowercase__ , lowercase__ , lowercase__ : str = t // 3600, (t // 60) % 60, t % 60
return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}"""
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=300 ):
# docstyle-ignore
return F"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : List[Any] = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
lowercase__ : Union[str, Any] = F"""{elt:.6f}""" if isinstance(UpperCAmelCase , UpperCAmelCase ) else str(UpperCAmelCase )
html_code += F""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = 0.2
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = 300 , ) -> Optional[Any]:
lowercase__ : List[str] = total
lowercase__ : Tuple = '''''' if prefix is None else prefix
lowercase__ : List[Any] = leave
lowercase__ : int = parent
lowercase__ : str = width
lowercase__ : List[str] = None
lowercase__ : List[str] = None
lowercase__ : List[str] = None
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = None ) -> List[str]:
lowercase__ : Union[str, Any] = value
if comment is not None:
lowercase__ : Union[str, Any] = comment
if self.last_value is None:
lowercase__ : List[Any] = time.time()
lowercase__ : Dict = value
lowercase__ : Any = None
lowercase__ : Tuple = self.warmup
lowercase__ : List[str] = 1
self.update_bar(__lowerCAmelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
lowercase__ : int = time.time()
lowercase__ : int = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
lowercase__ : Any = self.elapsed_time / (value - self.start_value)
else:
lowercase__ : str = None
if value >= self.total:
lowercase__ : Optional[Any] = self.total
lowercase__ : Union[str, Any] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
lowercase__ : Tuple = self.average_time_per_item * (self.total - value)
self.update_bar(__lowerCAmelCase )
lowercase__ : List[str] = value
lowercase__ : Tuple = current_time
if self.average_time_per_item is None:
lowercase__ : Any = 1
else:
lowercase__ : List[str] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=None ) -> int:
lowercase__ : Optional[int] = ''' ''' * (len(str(self.total ) ) - len(str(__lowerCAmelCase ) )) + str(__lowerCAmelCase )
if self.elapsed_time is None:
lowercase__ : int = F"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
lowercase__ : Dict = F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
lowercase__ : List[Any] = (
F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
F""" {format_time(self.predicted_remaining )}"""
)
self.label += F""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F""", {self.comment}]"""
self.display()
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : List[Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
lowercase__ : Any = disp.display(disp.HTML(self.html_code ) , display_id=__lowerCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def _lowerCAmelCase( self ) -> Optional[int]:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=None ) -> str:
super().__init__(__lowerCAmelCase )
lowercase__ : Optional[int] = None if column_names is None else [column_names]
lowercase__ : Dict = None
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Optional[int] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
lowercase__ : Tuple = disp.display(disp.HTML(self.html_code ) , display_id=__lowerCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
if self.inner_table is None:
lowercase__ : List[str] = [list(values.keys() ), list(values.values() )]
else:
lowercase__ : str = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__lowerCAmelCase )
lowercase__ : Optional[int] = columns
self.inner_table.append([values[c] for c in columns] )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=300 ) -> Dict:
lowercase__ : Dict = NotebookProgressBar(__lowerCAmelCase , prefix=__lowerCAmelCase , parent=self , width=__lowerCAmelCase )
return self.child_bar
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : List[Any] = None
self.display()
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self ) -> str:
lowercase__ : int = None
lowercase__ : Any = None
lowercase__ : List[Any] = False
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
lowercase__ : Tuple = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
lowercase__ : List[str] = 0
lowercase__ : Optional[int] = 0
lowercase__ : List[str] = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
lowercase__ : Dict = NotebookTrainingTracker(state.max_steps , __lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
lowercase__ : str = int(state.epoch ) if int(state.epoch ) == state.epoch else F"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=F"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
lowercase__ : int = False
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ) -> Optional[Any]:
if not has_length(__lowerCAmelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
lowercase__ : int = self.training_tracker.add_child(len(__lowerCAmelCase ) )
else:
lowercase__ : Tuple = NotebookProgressBar(len(__lowerCAmelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
if self.prediction_bar is not None:
self.prediction_bar.close()
lowercase__ : Any = None
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ) -> List[Any]:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
lowercase__ : List[str] = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
lowercase__ : Dict = state.global_step
self.training_tracker.write_line(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ) -> Union[str, Any]:
if self.training_tracker is not None:
lowercase__ : Tuple = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
lowercase__ : List[str] = log['''loss''']
break
if self.first_column == "Epoch":
lowercase__ : Optional[Any] = int(state.epoch )
else:
lowercase__ : Any = state.global_step
lowercase__ : str = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
lowercase__ : int = re.sub(r'''\_loss$''' , '''''' , __lowerCAmelCase )
lowercase__ : Tuple = metrics.pop('''total_flos''' , __lowerCAmelCase )
lowercase__ : Optional[Any] = metrics.pop('''epoch''' , __lowerCAmelCase )
lowercase__ : int = metrics.pop(F"""{metric_key_prefix}_runtime""" , __lowerCAmelCase )
lowercase__ : Union[str, Any] = metrics.pop(F"""{metric_key_prefix}_samples_per_second""" , __lowerCAmelCase )
lowercase__ : Union[str, Any] = metrics.pop(F"""{metric_key_prefix}_steps_per_second""" , __lowerCAmelCase )
lowercase__ : Optional[Any] = metrics.pop(F"""{metric_key_prefix}_jit_compilation_time""" , __lowerCAmelCase )
for k, v in metrics.items():
if k == F"""{metric_key_prefix}_loss""":
lowercase__ : Optional[int] = v
else:
lowercase__ : str = k.split('''_''' )
lowercase__ : Union[str, Any] = ''' '''.join([part.capitalize() for part in splits[1:]] )
lowercase__ : Optional[Any] = v
self.training_tracker.write_line(__lowerCAmelCase )
self.training_tracker.remove_child()
lowercase__ : Dict = None
# Evaluation takes a long time so we should force the next update.
lowercase__ : Tuple = True
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
self.training_tracker.update(
state.global_step , comment=F"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=__lowerCAmelCase )
lowercase__ : Optional[Any] = None
| 152
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__a: str = 16
__a: Optional[Any] = 32
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 16 , UpperCAmelCase = "bert-base-cased" ):
lowercase__ : str = AutoTokenizer.from_pretrained(UpperCAmelCase )
lowercase__ : Optional[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCAmelCase , max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Optional[int] = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=UpperCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowercase__ : Any = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
lowercase__ : Dict = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
# Initialize accelerator
lowercase__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Dict = config['''lr''']
lowercase__ : Dict = int(config['''num_epochs'''] )
lowercase__ : Optional[Any] = int(config['''seed'''] )
lowercase__ : List[Any] = int(config['''batch_size'''] )
lowercase__ : Tuple = args.model_name_or_path
set_seed(UpperCAmelCase )
lowercase__ , lowercase__ : Tuple = get_dataloaders(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase , return_dict=UpperCAmelCase )
# Instantiate optimizer
lowercase__ : Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : int = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowercase__ : int = 1
lowercase__ : int = (len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : Tuple = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase , num_warmup_steps=0 , num_training_steps=UpperCAmelCase , )
else:
lowercase__ : Tuple = DummyScheduler(UpperCAmelCase , total_num_steps=UpperCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : str = 0
# Now we train the model
lowercase__ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
lowercase__ : Any = 0
lowercase__ : Optional[Any] = {}
for epoch in range(UpperCAmelCase , UpperCAmelCase ):
model.train()
for step, batch in enumerate(UpperCAmelCase ):
lowercase__ : int = model(**UpperCAmelCase )
lowercase__ : Any = outputs.loss
lowercase__ : int = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ : Dict = 0
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Optional[Any] = model(**UpperCAmelCase )
lowercase__ : Tuple = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ : int = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase ) - 1:
lowercase__ : int = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ : List[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase , references=UpperCAmelCase , )
lowercase__ : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCAmelCase )
lowercase__ : Any = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
lowercase__ : Tuple = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase )
def __UpperCamelCase ( ):
lowercase__ : int = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=UpperCAmelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCAmelCase , )
parser.add_argument(
'''--output_dir''' , type=UpperCAmelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=UpperCAmelCase , default=3 , help='''Number of train epochs.''' , )
lowercase__ : List[Any] = parser.parse_args()
lowercase__ : List[str] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 152
| 1
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCamelCase = TypeVar('KT')
__lowerCamelCase = TypeVar('VT')
class _UpperCamelCase( Generic[KT, VT] ):
def __init__( self : str , _lowerCamelCase : KT | str = "root" , _lowerCamelCase : VT | None = None ):
_UpperCAmelCase : int = key
_UpperCAmelCase : str = value
_UpperCAmelCase : list[Node[KT, VT]] = []
def __repr__( self : List[str] ):
return f"""Node({self.key}: {self.value})"""
@property
def a__ ( self : str ):
return len(self.forward )
class _UpperCamelCase( Generic[KT, VT] ):
def __init__( self : List[Any] , _lowerCamelCase : float = 0.5 , _lowerCamelCase : int = 16 ):
_UpperCAmelCase : Node[KT, VT] = Node[KT, VT]()
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : List[str] = p
_UpperCAmelCase : Any = max_level
def __str__( self : Any ):
_UpperCAmelCase : Dict = list(self )
if len(_lowerCamelCase ) == 0:
return f"""SkipList(level={self.level})"""
_UpperCAmelCase : Tuple = max((len(str(_lowerCamelCase ) ) for item in items) , default=4 )
_UpperCAmelCase : int = max(_lowerCamelCase , 4 ) + 4
_UpperCAmelCase : int = self.head
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[str] = node.forward.copy()
lines.append(f"""[{node.key}]""".ljust(_lowerCamelCase , "-" ) + "* " * len(_lowerCamelCase ) )
lines.append(" " * label_size + "| " * len(_lowerCamelCase ) )
while len(node.forward ) != 0:
_UpperCAmelCase : List[str] = node.forward[0]
lines.append(
f"""[{node.key}]""".ljust(_lowerCamelCase , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(_lowerCamelCase ) )
_UpperCAmelCase : Optional[Any] = node.forward
lines.append("None".ljust(_lowerCamelCase ) + "* " * len(_lowerCamelCase ) )
return f"""SkipList(level={self.level})\n""" + "\n".join(_lowerCamelCase )
def __iter__( self : Any ):
_UpperCAmelCase : List[Any] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
_UpperCAmelCase : Optional[Any] = node.forward[0]
def a__ ( self : Dict ):
_UpperCAmelCase : List[Any] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def a__ ( self : Optional[int] , _lowerCamelCase : Dict ):
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Optional[Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_UpperCAmelCase : Union[str, Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_lowerCamelCase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def a__ ( self : Any , _lowerCamelCase : KT ):
_UpperCAmelCase ,_UpperCAmelCase : Dict = self._locate_node(_lowerCamelCase )
if node is not None:
for i, update_node in enumerate(_lowerCamelCase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_UpperCAmelCase : List[str] = node.forward[i]
else:
_UpperCAmelCase : str = update_node.forward[:i]
def a__ ( self : List[str] , _lowerCamelCase : KT , _lowerCamelCase : VT ):
_UpperCAmelCase ,_UpperCAmelCase : str = self._locate_node(_lowerCamelCase )
if node is not None:
_UpperCAmelCase : Optional[Any] = value
else:
_UpperCAmelCase : Optional[int] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _lowerCamelCase ):
update_vector.append(self.head )
_UpperCAmelCase : Dict = level
_UpperCAmelCase : Tuple = Node(_lowerCamelCase , _lowerCamelCase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_lowerCamelCase )
else:
_UpperCAmelCase : int = new_node
def a__ ( self : Optional[Any] , _lowerCamelCase : VT ):
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = self._locate_node(_lowerCamelCase )
if node is not None:
return node.value
return None
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 1_2 )
skip_list.insert("Key3" , 4_1 )
skip_list.insert("Key4" , -1_9 )
_UpperCAmelCase : str = skip_list.head
_UpperCAmelCase : Any = {}
while node.level != 0:
_UpperCAmelCase : int = node.forward[0]
_UpperCAmelCase : Optional[Any] = node.value
assert len(_SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
_UpperCAmelCase : Dict = SkipList()
skip_list.insert("Key1" , 1_0 )
skip_list.insert("Key1" , 1_2 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 1_0 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 1_0 )
_UpperCAmelCase : Optional[int] = skip_list.head
_UpperCAmelCase : str = {}
while node.level != 0:
_UpperCAmelCase : Union[str, Any] = node.forward[0]
_UpperCAmelCase : str = node.value
if len(_SCREAMING_SNAKE_CASE ) != 4:
print()
assert len(_SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
_UpperCAmelCase : Any = SkipList()
assert skip_list.find("Some key" ) is None
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Any = SkipList()
skip_list.insert("Key2" , 2_0 )
assert skip_list.find("Key2" ) == 2_0
skip_list.insert("Some Key" , 1_0 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 1_3 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 1_0
assert skip_list.find("V" ) == 1_3
def lowerCAmelCase_ ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Any = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
_UpperCAmelCase : str = SkipList()
skip_list.insert("Key1" , 1_2 )
skip_list.insert("V" , 1_3 )
skip_list.insert("X" , 1_4 )
skip_list.insert("Key2" , 1_5 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Dict = SkipList()
skip_list.insert("Key1" , 1_2 )
skip_list.insert("V" , 1_3 )
skip_list.insert("X" , 1_4 )
skip_list.insert("Key2" , 1_5 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 1_4
assert skip_list.find("Key1" ) == 1_2
assert skip_list.find("Key2" ) == 1_5
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 1_2
assert skip_list.find("Key2" ) == 1_5
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 1_5
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Dict = SkipList()
skip_list.insert("Key1" , 1_2 )
skip_list.insert("V" , 1_3 )
skip_list.insert("X" , 1_4_2 )
skip_list.insert("Key2" , 1_5 )
skip_list.delete("X" )
def traverse_keys(_SCREAMING_SNAKE_CASE ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_SCREAMING_SNAKE_CASE )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
def is_sorted(_SCREAMING_SNAKE_CASE ):
return all(next_item >= item for item, next_item in zip(_SCREAMING_SNAKE_CASE , lst[1:] ) )
_UpperCAmelCase : int = SkipList()
for i in range(1_0 ):
skip_list.insert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
_UpperCAmelCase : int = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 328
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase : str = ""
else:
_UpperCAmelCase : Any = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase : str = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Any = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : Any = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = dct.pop(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Any = val
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = ViTMSNConfig()
_UpperCAmelCase : int = 1_0_0_0
_UpperCAmelCase : str = "datasets/huggingface/label-files"
_UpperCAmelCase : int = "imagenet-1k-id2label.json"
_UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , "r" ) )
_UpperCAmelCase : Dict = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_UpperCAmelCase : List[Any] = idalabel
_UpperCAmelCase : Any = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_UpperCAmelCase : Union[str, Any] = 3_8_4
_UpperCAmelCase : Any = 1_5_3_6
_UpperCAmelCase : Optional[Any] = 6
elif "l16" in checkpoint_url:
_UpperCAmelCase : Optional[int] = 1_0_2_4
_UpperCAmelCase : Union[str, Any] = 4_0_9_6
_UpperCAmelCase : Any = 2_4
_UpperCAmelCase : Any = 1_6
_UpperCAmelCase : List[Any] = 0.1
elif "b4" in checkpoint_url:
_UpperCAmelCase : Optional[Any] = 4
elif "l7" in checkpoint_url:
_UpperCAmelCase : str = 7
_UpperCAmelCase : List[str] = 1_0_2_4
_UpperCAmelCase : int = 4_0_9_6
_UpperCAmelCase : List[str] = 2_4
_UpperCAmelCase : Any = 1_6
_UpperCAmelCase : Optional[int] = 0.1
_UpperCAmelCase : Any = ViTMSNModel(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="cpu" )["target_encoder"]
_UpperCAmelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
remove_projection_head(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : str = create_rename_keys(_SCREAMING_SNAKE_CASE , base_model=_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , base_model=_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Any = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
_UpperCAmelCase : Optional[int] = ViTImageProcessor(
size=config.image_size , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
_UpperCAmelCase : int = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Tuple = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_UpperCAmelCase : Dict = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
_UpperCAmelCase : Union[str, Any] = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
_UpperCAmelCase : Dict = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
_UpperCAmelCase : Any = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
_UpperCAmelCase : List[Any] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__lowerCamelCase = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 328
| 1
|
import operator as op
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Any = lambda lowercase , lowercase : int(x / y ) # noqa: E731 integer division operation
SCREAMING_SNAKE_CASE : str = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(lowercase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowercase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(lowercase ) , sep=" | " )
else:
SCREAMING_SNAKE_CASE : Any = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(lowercase ) , sep=" | " )
SCREAMING_SNAKE_CASE : int = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(lowercase ) , sep=" | " )
stack.append(
str(opr[x](int(lowercase ) , int(lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(lowercase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
snake_case = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 62
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : List[Any] ):
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
lowerCamelCase_ = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowerCamelCase_ = CLIPTextModel(__UpperCamelCase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any=0 ):
lowerCamelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("""RGB""" )
if str(__UpperCamelCase ).startswith("""mps""" ):
lowerCamelCase_ = torch.manual_seed(__UpperCamelCase )
else:
lowerCamelCase_ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
lowerCamelCase_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : Dict ):
lowerCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = sd_pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Dict ):
lowerCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = """french fries"""
lowerCamelCase_ = sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = [inputs["""prompt"""]] * 2
lowerCamelCase_ = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
lowerCamelCase_ = torch.from_numpy(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
lowerCamelCase_ = image / 2 + 0.5
lowerCamelCase_ = image.permute(0 , 3 , 1 , 2 )
lowerCamelCase_ = image.repeat(2 , 1 , 1 , 1 )
lowerCamelCase_ = sd_pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
lowerCamelCase_ = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Dict ):
lowerCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = sd_pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = [round(__UpperCamelCase , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(__UpperCamelCase ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = VaeImageProcessor(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase )
lowerCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = pipe(**self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type="""pt""" ) )[0]
lowerCamelCase_ = components["""vae"""]
lowerCamelCase_ = self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowerCamelCase_ = vae.encode(inputs[image_param] ).latent_dist.mode()
lowerCamelCase_ = pipe(**__UpperCamelCase )[0]
lowerCamelCase_ = np.abs(out - out_latents_inputs ).max()
self.assertLess(__UpperCamelCase , 1E-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __A( unittest.TestCase ):
def lowercase__ ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] , __UpperCamelCase : Union[str, Any]=0 ):
lowerCamelCase_ = torch.manual_seed(__UpperCamelCase )
lowerCamelCase_ = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
lowerCamelCase_ = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase )
lowerCamelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase )
lowerCamelCase_ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = 0
def callback_fn(__UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : torch.FloatTensor ) -> None:
lowerCamelCase_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
lowerCamelCase_ = latents[0, -3:, -3:, -1]
lowerCamelCase_ = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowerCamelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
lowerCamelCase_ = latents[0, -3:, -3:, -1]
lowerCamelCase_ = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowerCamelCase_ = False
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase__ ( self : int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**__UpperCamelCase )
lowerCamelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ = inputs["""image"""].resize((5_0_4, 5_0_4) )
lowerCamelCase_ = """timbrooks/instruct-pix2pix"""
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = pipe(**__UpperCamelCase )
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
lowerCamelCase_ = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 272
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["LayoutLMv2FeatureExtractor"]
UpperCamelCase__ = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 548
|
def _UpperCamelCase (a__ :int ):
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCamelCase__ = 1
UpperCamelCase__ = 1
while repunit:
UpperCamelCase__ = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (a__ :int = 100_0000 ):
"""simple docstring"""
UpperCamelCase__ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(a__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 548
| 1
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( a_ , unittest.TestCase ):
_lowercase : List[Any] = MgpstrTokenizer
_lowercase : Union[str, Any] = False
_lowercase : Tuple = {}
_lowercase : List[Any] = False
def lowerCAmelCase_ ( self : Union[str, Any] ):
super().setUp()
# fmt: off
__A : Dict = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__A : Tuple = dict(zip(__A , range(len(__A ) ) ) )
__A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__A ) + """\n""" )
def lowerCAmelCase_ ( self : List[str] , **__A : int ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__A )
def lowerCAmelCase_ ( self : Optional[Any] , __A : Union[str, Any] ):
__A : Optional[Any] = 'tester'
__A : List[Any] = 'tester'
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def lowerCAmelCase_ ( self : Union[str, Any] ):
pass
def lowerCAmelCase_ ( self : List[Any] ):
__A : Dict = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__A : Optional[Any] = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__A : Dict = tokenizer.encode([special_token] , add_special_tokens=__A )
self.assertEqual(len(__A ) , 1 )
__A : Any = tokenizer.decode(__A , skip_special_tokens=__A )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__A : List[str] = self.get_input_output_texts(__A )
__A : List[Any] = tokenizer.tokenize(__A )
__A : Optional[int] = tokenizer.convert_tokens_to_ids(__A )
__A : List[Any] = tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
__A : Dict = tokenizer.convert_ids_to_tokens(__A )
self.assertNotEqual(len(__A ) , 0 )
__A : Optional[Any] = tokenizer.decode(__A )
self.assertIsInstance(__A , __A )
self.assertEqual(text_a.replace(""" """ , """""" ) , __A )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def lowerCAmelCase_ ( self : Tuple ):
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def lowerCAmelCase_ ( self : Dict ):
pass
| 17
|
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
A : Optional[Any] = logging.get_logger(__name__)
class __lowerCamelCase :
"""simple docstring"""
a = 42
a = None
@staticmethod
def A ( ):
raise NotImplementedError
def A ( self : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : str):
raise NotImplementedError
def A ( self : Tuple , SCREAMING_SNAKE_CASE : Any):
raise NotImplementedError
def A ( self : Tuple):
if not self.is_available():
raise RuntimeError(
F'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.')
@classmethod
def A ( cls : Tuple):
return F'`pip install {cls.pip_package or cls.name}`'
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = "optuna"
@staticmethod
def A ( ):
return is_optuna_available()
def A ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : str):
return run_hp_search_optuna(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int):
return default_hp_space_optuna(SCREAMING_SNAKE_CASE)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = "ray"
a = "'ray[tune]'"
@staticmethod
def A ( ):
return is_ray_available()
def A ( self : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Dict):
return run_hp_search_ray(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
def A ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any]):
return default_hp_space_ray(SCREAMING_SNAKE_CASE)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = "sigopt"
@staticmethod
def A ( ):
return is_sigopt_available()
def A ( self : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : List[Any]):
return run_hp_search_sigopt(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any]):
return default_hp_space_sigopt(SCREAMING_SNAKE_CASE)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = "wandb"
@staticmethod
def A ( ):
return is_wandb_available()
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : str):
return run_hp_search_wandb(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any]):
return default_hp_space_wandb(SCREAMING_SNAKE_CASE)
A : Dict = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCAmelCase__ ( ):
_A : Any = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowerCamelCase ) > 0:
_A : Optional[Any] = available_backends[0].name
if len(lowerCamelCase ) > 1:
logger.info(
F'{len(lowerCamelCase )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
F' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 128
| 0
|
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowercase =logging.get_logger(__name__)
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
'''simple docstring'''
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowerCamelCase__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : Optional[str] , __lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =tesseract_config if tesseract_config is not None else ''
# apply OCR
_UpperCAmelCase : str =to_pil_image(__lowerCamelCase )
_UpperCAmelCase : List[Any] =pil_image.size
_UpperCAmelCase : Tuple =pytesseract.image_to_data(__lowerCamelCase , lang=__lowerCamelCase , output_type='dict' , config=__lowerCamelCase )
_UpperCAmelCase : Any =data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
_UpperCAmelCase : str =[idx for idx, word in enumerate(__lowerCamelCase ) if not word.strip()]
_UpperCAmelCase : List[Any] =[word for idx, word in enumerate(__lowerCamelCase ) if idx not in irrelevant_indices]
_UpperCAmelCase : Union[str, Any] =[coord for idx, coord in enumerate(__lowerCamelCase ) if idx not in irrelevant_indices]
_UpperCAmelCase : List[str] =[coord for idx, coord in enumerate(__lowerCamelCase ) if idx not in irrelevant_indices]
_UpperCAmelCase : Any =[coord for idx, coord in enumerate(__lowerCamelCase ) if idx not in irrelevant_indices]
_UpperCAmelCase : Any =[coord for idx, coord in enumerate(__lowerCamelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_UpperCAmelCase : List[Any] =[]
for x, y, w, h in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
_UpperCAmelCase : Any =[x, y, x + w, y + h]
actual_boxes.append(__lowerCamelCase )
# finally, normalize the bounding boxes
_UpperCAmelCase : Tuple =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
assert len(__lowerCamelCase ) == len(__lowerCamelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =["pixel_values"]
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BILINEAR , snake_case = True , snake_case = None , snake_case = "" , **snake_case , ) -> None:
'''simple docstring'''
super().__init__(**snake_case)
_UpperCAmelCase : List[str] =size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
_UpperCAmelCase : Any =get_size_dict(snake_case)
_UpperCAmelCase : str =do_resize
_UpperCAmelCase : Tuple =size
_UpperCAmelCase : int =resample
_UpperCAmelCase : Any =apply_ocr
_UpperCAmelCase : Any =ocr_lang
_UpperCAmelCase : Tuple =tesseract_config
def lowerCAmelCase ( self , snake_case , snake_case , snake_case = PILImageResampling.BILINEAR , snake_case = None , **snake_case , ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =get_size_dict(snake_case)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
_UpperCAmelCase : Optional[Any] =(size['height'], size['width'])
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case)
def lowerCAmelCase ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ) -> PIL.Image.Image:
'''simple docstring'''
_UpperCAmelCase : Dict =do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : Tuple =size if size is not None else self.size
_UpperCAmelCase : str =get_size_dict(snake_case)
_UpperCAmelCase : Dict =resample if resample is not None else self.resample
_UpperCAmelCase : Optional[int] =apply_ocr if apply_ocr is not None else self.apply_ocr
_UpperCAmelCase : int =ocr_lang if ocr_lang is not None else self.ocr_lang
_UpperCAmelCase : Optional[Any] =tesseract_config if tesseract_config is not None else self.tesseract_config
_UpperCAmelCase : Tuple =make_list_of_images(snake_case)
if not valid_images(snake_case):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase : List[Any] =[to_numpy_array(snake_case) for image in images]
if apply_ocr:
requires_backends(self , 'pytesseract')
_UpperCAmelCase : Optional[int] =[]
_UpperCAmelCase : List[Any] =[]
for image in images:
_UpperCAmelCase : str =apply_tesseract(snake_case , snake_case , snake_case)
words_batch.append(snake_case)
boxes_batch.append(snake_case)
if do_resize:
_UpperCAmelCase : Optional[Any] =[self.resize(image=snake_case , size=snake_case , resample=snake_case) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_UpperCAmelCase : Union[str, Any] =[flip_channel_order(snake_case) for image in images]
_UpperCAmelCase : int =[to_channel_dimension_format(snake_case , snake_case) for image in images]
_UpperCAmelCase : str =BatchFeature(data={'pixel_values': images} , tensor_type=snake_case)
if apply_ocr:
_UpperCAmelCase : Dict =words_batch
_UpperCAmelCase : Optional[int] =boxes_batch
return data
| 703
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
lowercase ='docs/source/en/_toctree.yml'
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
'''simple docstring'''
_UpperCAmelCase : str =defaultdict(__lowerCamelCase )
_UpperCAmelCase : str =[]
_UpperCAmelCase : Optional[Any] =[]
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(__lowerCamelCase )
_UpperCAmelCase : List[Any] =new_doc_list
_UpperCAmelCase : Dict =[key for key, value in counts.items() if value > 1]
_UpperCAmelCase : Tuple =[]
for duplicate_key in duplicates:
_UpperCAmelCase : List[Any] =list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(__lowerCamelCase ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
_UpperCAmelCase : Any =sorted(__lowerCamelCase , key=lambda __lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__lowerCamelCase ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(__lowerCamelCase )
# Sort
return overview_doc
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
with open(__lowerCamelCase , encoding='utf-8' ) as f:
_UpperCAmelCase : Any =yaml.safe_load(f.read() )
# Get to the API doc
_UpperCAmelCase : Tuple =0
while content[api_idx]["title"] != "API":
api_idx += 1
_UpperCAmelCase : Optional[int] =content[api_idx]['sections']
# Then to the model doc
_UpperCAmelCase : Dict =0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_UpperCAmelCase : Any =api_doc[scheduler_idx]['sections']
_UpperCAmelCase : Any =clean_doc_toc(__lowerCamelCase )
_UpperCAmelCase : Dict =False
if new_scheduler_doc != scheduler_doc:
_UpperCAmelCase : Optional[Any] =True
if overwrite:
_UpperCAmelCase : List[Any] =new_scheduler_doc
if diff:
if overwrite:
_UpperCAmelCase : List[Any] =api_doc
with open(__lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__lowerCamelCase , allow_unicode=__lowerCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
with open(__lowerCamelCase , encoding='utf-8' ) as f:
_UpperCAmelCase : Union[str, Any] =yaml.safe_load(f.read() )
# Get to the API doc
_UpperCAmelCase : Optional[int] =0
while content[api_idx]["title"] != "API":
api_idx += 1
_UpperCAmelCase : Dict =content[api_idx]['sections']
# Then to the model doc
_UpperCAmelCase : Optional[Any] =0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_UpperCAmelCase : Tuple =False
_UpperCAmelCase : int =api_doc[pipeline_idx]['sections']
_UpperCAmelCase : List[Any] =[]
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_UpperCAmelCase : Dict =pipeline_doc['section']
_UpperCAmelCase : Tuple =clean_doc_toc(__lowerCamelCase )
if overwrite:
_UpperCAmelCase : List[str] =new_sub_pipeline_doc
new_pipeline_docs.append(__lowerCamelCase )
# sort overall pipeline doc
_UpperCAmelCase : Union[str, Any] =clean_doc_toc(__lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
_UpperCAmelCase : Optional[Any] =True
if overwrite:
_UpperCAmelCase : Optional[int] =new_pipeline_docs
if diff:
if overwrite:
_UpperCAmelCase : Optional[Any] =api_doc
with open(__lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__lowerCamelCase , allow_unicode=__lowerCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
lowercase =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase =parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 331
| 0
|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A ( __lowercase , unittest.TestCase ):
_snake_case ='''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: Dict=0 ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, 3, 128, 128) , rng=random.Random(_lowerCAmelCase ) )
UpperCAmelCase_ =np.random.RandomState(_lowerCAmelCase )
UpperCAmelCase_ ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ =np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ =np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
# warmup pass to apply optimizations
UpperCAmelCase_ =pipe(**self.get_dummy_inputs() )
UpperCAmelCase_ =self.get_dummy_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ =np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self: List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ =np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self: str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ =np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self: Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ =np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class A ( unittest.TestCase ):
@property
def lowerCAmelCase__ ( self: List[str] ) -> Dict:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self: List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =ort.SessionOptions()
UpperCAmelCase_ =False
return options
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase_ =init_image.resize((768, 512) )
# using the PNDM scheduler by default
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ ="A fantasy landscape, trending on artstation"
UpperCAmelCase_ =np.random.RandomState(0 )
UpperCAmelCase_ =pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="np" , )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase_ =np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCAmelCase__ ( self: str ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase_ =init_image.resize((768, 512) )
UpperCAmelCase_ =LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ ="A fantasy landscape, trending on artstation"
UpperCAmelCase_ =np.random.RandomState(0 )
UpperCAmelCase_ =pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowerCAmelCase , output_type="np" , )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase_ =np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 54
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( lowercase__ , lowercase__ , lowercase__=1_0_2_4 , lowercase__=1_0_2_4 , lowercase__=False , **lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =AutoTokenizer.from_pretrained(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="train" , **lowercase__ )
UpperCAmelCase_ =tok.pad_token_id
def get_lens(lowercase__ ):
UpperCAmelCase_ =tqdm(
DataLoader(lowercase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowercase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ =[]
for batch in dl:
UpperCAmelCase_ =batch["input_ids"].ne(lowercase__ ).sum(1 ).tolist()
UpperCAmelCase_ =batch["labels"].ne(lowercase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase__ , lowercase__ ):
max_lens.append(max(lowercase__ , lowercase__ ) )
else:
max_lens.extend(lowercase__ )
return max_lens
UpperCAmelCase_ =get_lens(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="val" , **lowercase__ )
UpperCAmelCase_ =get_lens(lowercase__ )
pickle_save(lowercase__ , train_ds.len_file )
pickle_save(lowercase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 54
| 1
|
"""simple docstring"""
import copy
import re
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: Tuple = '''hp'''
_lowerCamelCase: Optional[Any] = {}
_lowerCamelCase: Optional[Any] = None
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,A_ : Union[str, Any] ,A_ : Dict ) -> Union[str, Any]:
A = prefix
A = defaults
cls.build_naming_info()
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : Optional[Any] ,A_ : Optional[int] ) -> List[Any]:
if len(A_ ) == 0:
return ""
A = None
if any(char.isdigit() for char in word ):
raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 ,len(A_ ) + 1 ):
A = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
A = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(A_ : int ):
A = ''
while integer != 0:
A = chr(ord('A' ) + integer % 10 ) + s
integer //= 10
return s
A = 0
while True:
A = word + '#' + int_to_alphabetic(A_ )
if sword in info["reverse_short_word"]:
continue
else:
A = sword
break
A = short_word
A = word
return short_word
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : Any ,A_ : Any ) -> List[str]:
A = param_name.split('_' )
A = [TrialShortNamer.shortname_for_word(A_ ,A_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
A = ['', '_']
for separator in separators:
A = separator.join(A_ )
if shortname not in info["reverse_short_param"]:
A = shortname
A = param_name
return shortname
return param_name
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : List[Any] ) -> Dict:
A = TrialShortNamer.shortname_for_key(A_ ,A_ )
A = short_name
A = param_name
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ) -> Union[str, Any]:
if cls.NAMING_INFO is not None:
return
A = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
A = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(A_ ,A_ )
A = info
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,A_ : List[str] ) -> Optional[int]:
cls.build_naming_info()
assert cls.PREFIX is not None
A = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
A = cls.NAMING_INFO['short_param'][k]
if isinstance(A_ ,A_ ):
A = 1 if v else 0
A = '' if isinstance(A_ ,(int, float) ) else '-'
A = F'{key}{sep}{v}'
name.append(A_ )
return "_".join(A_ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,A_ : List[Any] ) -> Tuple:
A = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
A = []
else:
A = repr.split('_' )
A = {}
for value in values:
if "-" in value:
A , A = value.split('-' )
else:
A = re.sub('[0-9.]' ,'' ,A_ )
A = float(re.sub('[^0-9.]' ,'' ,A_ ) )
A = cls.NAMING_INFO['reverse_short_param'][p_k]
A = p_v
for k in cls.DEFAULTS:
if k not in parameters:
A = cls.DEFAULTS[k]
return parameters
| 22
|
"""simple docstring"""
def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ):
A = len(snake_case__ )
A = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
A = y_points[i]
for i in range(2 , snake_case__ ):
for j in range(snake_case__ , snake_case__ ):
A = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
| 1
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case__ ( ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__SCREAMING_SNAKE_CASE , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__SCREAMING_SNAKE_CASE , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__SCREAMING_SNAKE_CASE )
return parser.parse_args()
def snake_case__ ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase = parse_args()
# Import training_script as a module.
lowerCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase = script_fpath.stem
lowerCAmelCase = importlib.import_module(__SCREAMING_SNAKE_CASE )
# Patch sys.argv
lowerCAmelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 370
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class lowerCAmelCase_ ( snake_case__ , snake_case__ ):
"""simple docstring"""
a_ :Tuple ="""resnet"""
a_ :int =["""basic""", """bottleneck"""]
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Tuple=6_4 , SCREAMING_SNAKE_CASE__ : Dict=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , SCREAMING_SNAKE_CASE__ : Optional[int]=[3, 4, 6, 3] , SCREAMING_SNAKE_CASE__ : Optional[Any]="bottleneck" , SCREAMING_SNAKE_CASE__ : str="relu" , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Tuple=None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
__a = num_channels
__a = embedding_size
__a = hidden_sizes
__a = depths
__a = layer_type
__a = hidden_act
__a = downsample_in_first_stage
__a = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(SCREAMING_SNAKE_CASE__ ) + 1 )]
__a , __a = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE__ , out_indices=SCREAMING_SNAKE_CASE__ , stage_names=self.stage_names )
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :Any =version.parse("""1.11""" )
@property
def __a ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return 1E-3
| 582
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( __lowercase : float , __lowercase : float , __lowercase : float ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger()
@dataclass
class A_ :
_lowerCamelCase : nn.Module
_lowerCamelCase : List[nn.Module] = field(default_factory=lowerCAmelCase_ )
_lowerCamelCase : list = field(default_factory=lowerCAmelCase_ )
def lowercase ( self : Dict , snake_case_ : Dict , snake_case_ : Tensor , snake_case_ : Tensor ):
_UpperCAmelCase = len(list(m.modules() ) ) == 1 or isinstance(snake_case_ , nn.Convad ) or isinstance(snake_case_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case_ )
def __call__( self : str , snake_case_ : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case_ )
[x.remove() for x in self.handles]
return self
@property
def lowercase ( self : int ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
_lowerCamelCase : nn.Module
_lowerCamelCase : nn.Module
_lowerCamelCase : int = 1
_lowerCamelCase : List = field(default_factory=lowerCAmelCase_ )
_lowerCamelCase : List = field(default_factory=lowerCAmelCase_ )
_lowerCamelCase : bool = True
def __call__( self : str , snake_case_ : Tensor ):
_UpperCAmelCase = Tracker(self.dest )(snake_case_ ).parametrized
_UpperCAmelCase = Tracker(self.src )(snake_case_ ).parametrized
_UpperCAmelCase = list(filter(lambda snake_case_ : type(snake_case_ ) not in self.src_skip , snake_case_ ) )
_UpperCAmelCase = list(filter(lambda snake_case_ : type(snake_case_ ) not in self.dest_skip , snake_case_ ) )
if len(snake_case_ ) != len(snake_case_ ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case_ )} operations while'
f' destination module has {len(snake_case_ )}.' )
for dest_m, src_m in zip(snake_case_ , snake_case_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class A_ ( nn.Module ):
def __init__( self : str , snake_case_ : nn.Module ):
super().__init__()
_UpperCAmelCase = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), f'Unexpected layer name {k}'
_UpperCAmelCase = len(snake_case_ ) + 1
feature_blocks.append((f'res{block_index}', v) )
_UpperCAmelCase = nn.ModuleDict(snake_case_ )
def lowercase ( self : Optional[int] , snake_case_ : Tensor ):
return get_trunk_forward_outputs(
snake_case_ , out_feat_keys=snake_case_ , feature_blocks=self._feature_blocks , )
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : Any , snake_case_ : str ):
_UpperCAmelCase = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Any , snake_case_ : str ):
# default to timm!
if x not in self:
_UpperCAmelCase = self.convert_name_to_timm(snake_case_ )
_UpperCAmelCase = partial(lambda: (timm.create_model(snake_case_ , pretrained=snake_case_ ).eval(), None) )
else:
_UpperCAmelCase = super().__getitem__(snake_case_ )
return val
class A_ ( lowerCAmelCase_ ):
def __getitem__( self : Tuple , snake_case_ : str ):
if "seer" in x and "in1k" not in x:
_UpperCAmelCase = RegNetModel
else:
_UpperCAmelCase = RegNetForImageClassification
return val
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : List[Tuple[str, str]] ) -> List[Any]:
'''simple docstring'''
for from_key, to_key in keys:
_UpperCAmelCase = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}' )
return to_state_dict
def UpperCAmelCase_ ( __lowercase : str , __lowercase : Callable[[], nn.Module] , __lowercase : Callable[[], nn.Module] , __lowercase : RegNetConfig , __lowercase : Path , __lowercase : bool = True , ) -> str:
'''simple docstring'''
print(f'Converting {name}...' )
with torch.no_grad():
_UpperCAmelCase , _UpperCAmelCase = from_model_func()
_UpperCAmelCase = our_model_func(__lowercase ).eval()
_UpperCAmelCase = ModuleTransfer(src=__lowercase , dest=__lowercase , raise_if_mismatch=__lowercase )
_UpperCAmelCase = torch.randn((1, 3, 224, 224) )
module_transfer(__lowercase )
if from_state_dict is not None:
_UpperCAmelCase = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
_UpperCAmelCase = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
_UpperCAmelCase = manually_copy_vissl_head(__lowercase , our_model.state_dict() , __lowercase )
our_model.load_state_dict(__lowercase )
_UpperCAmelCase = our_model(__lowercase , output_hidden_states=__lowercase )
_UpperCAmelCase = (
our_outputs.logits if isinstance(__lowercase , __lowercase ) else our_outputs.last_hidden_state
)
_UpperCAmelCase = from_model(__lowercase )
_UpperCAmelCase = from_output[-1] if type(__lowercase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
_UpperCAmelCase = our_outputs.hidden_states[-1]
assert torch.allclose(__lowercase , __lowercase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=__lowercase , )
_UpperCAmelCase = 224 if "seer" not in name else 384
# we can use the convnext one
_UpperCAmelCase = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=__lowercase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=__lowercase , )
print(f'Pushed {name}' )
def UpperCAmelCase_ ( __lowercase : Path , __lowercase : str = None , __lowercase : bool = True ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = "imagenet-1k-id2label.json"
_UpperCAmelCase = 1000
_UpperCAmelCase = (1, num_labels)
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = num_labels
_UpperCAmelCase = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type="dataset" ) ) , "r" ) )
_UpperCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = partial(__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase )
_UpperCAmelCase = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
_UpperCAmelCase = NameToOurModelFuncMap()
_UpperCAmelCase = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__lowercase : str , __lowercase : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
_UpperCAmelCase = torch.hub.load_state_dict_from_url(__lowercase , model_dir=str(__lowercase ) , map_location="cpu" )
_UpperCAmelCase = model_func()
# check if we have a head, if yes add it
_UpperCAmelCase = files["classy_state_dict"]["base_model"]["model"]
_UpperCAmelCase = model_state_dict["trunk"]
model.load_state_dict(__lowercase )
return model.eval(), model_state_dict["heads"]
# pretrained
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
__lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __lowercase , __lowercase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __lowercase , __lowercase , __lowercase , )
return config, expected_shape
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__SCREAMING_SNAKE_CASE :Any = parser.parse_args()
__SCREAMING_SNAKE_CASE :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 119
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case_ : Optional[int] = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = ["""LayoutLMv2FeatureExtractor"""]
snake_case_ : Dict = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
snake_case_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 595
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self : Dict , lowercase : int , lowercase : Dict=7 , lowercase : Optional[int]=3 , lowercase : Any=18 , lowercase : Tuple=30 , lowercase : List[str]=4_00 , lowercase : Dict=True , lowercase : Dict=None , lowercase : List[str]=True , lowercase : List[str]=None , lowercase : Tuple=True , lowercase : List[str]=[0.5, 0.5, 0.5] , lowercase : Dict=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
UpperCAmelCase : Tuple = size if size is not None else {"shortest_edge": 18}
UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : Optional[Any] = num_channels
UpperCAmelCase : Tuple = image_size
UpperCAmelCase : List[Any] = min_resolution
UpperCAmelCase : Optional[int] = max_resolution
UpperCAmelCase : List[Any] = do_resize
UpperCAmelCase : Dict = size
UpperCAmelCase : Tuple = do_center_crop
UpperCAmelCase : Optional[int] = crop_size
UpperCAmelCase : int = do_normalize
UpperCAmelCase : Tuple = image_mean
UpperCAmelCase : Tuple = image_std
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = LevitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase : Dict = LevitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase : Any = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase : Optional[int] = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
UpperCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase : Dict = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 595
| 1
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class a__ :
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , _A=1_0_0_0 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
__lowerCAmelCase = range_bbox
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCAmelCase = bbox[i, j, 3]
__lowerCAmelCase = bbox[i, j, 1]
__lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCAmelCase = bbox[i, j, 2]
__lowerCAmelCase = bbox[i, j, 0]
__lowerCAmelCase = t
__lowerCAmelCase = tf.convert_to_tensor(_A )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFLayoutLMModel(config=_A )
__lowerCAmelCase = model(_A , _A , attention_mask=_A , token_type_ids=_A )
__lowerCAmelCase = model(_A , _A , token_type_ids=_A )
__lowerCAmelCase = model(_A , _A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFLayoutLMForMaskedLM(config=_A )
__lowerCAmelCase = model(_A , _A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFLayoutLMForSequenceClassification(config=_A )
__lowerCAmelCase = model(_A , _A , attention_mask=_A , token_type_ids=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFLayoutLMForTokenClassification(config=_A )
__lowerCAmelCase = model(_A , _A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFLayoutLMForQuestionAnswering(config=_A )
__lowerCAmelCase = model(_A , _A , attention_mask=_A , token_type_ids=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : Dict = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_a : Dict = (
{
"""feature-extraction""": TFLayoutLMModel,
"""fill-mask""": TFLayoutLMForMaskedLM,
"""text-classification""": TFLayoutLMForSequenceClassification,
"""token-classification""": TFLayoutLMForTokenClassification,
"""zero-shot""": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_a : Tuple = False
_a : Optional[int] = True
_a : str = 1_0
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFLayoutLMModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFLayoutLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def _a ( ):
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
__lowerCAmelCase = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
__lowerCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
__lowerCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
__lowerCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
__lowerCAmelCase = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
__lowerCAmelCase = model(input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A )
# test the sequence output on [0, :3, :3]
__lowerCAmelCase = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1E-3 ) )
# test the pooled output on [1, :3]
__lowerCAmelCase = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _A , atol=1E-3 ) )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
__lowerCAmelCase = model(
input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
__lowerCAmelCase = outputs.loss
__lowerCAmelCase = (2,)
self.assertEqual(loss.shape , _A )
# test the shape of the logits
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = (2, 2)
self.assertEqual(logits.shape , _A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=1_3 )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
__lowerCAmelCase = model(
input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A , labels=_A )
# test the shape of the logits
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , _A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
__lowerCAmelCase = model(input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A )
# test the shape of the logits
__lowerCAmelCase = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , _A )
self.assertEqual(outputs.end_logits.shape , _A )
| 552
|
def _a ( SCREAMING_SNAKE_CASE_ : int ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__lowerCAmelCase = 1
__lowerCAmelCase = 1
while repunit:
__lowerCAmelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_00_00 ):
__lowerCAmelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(SCREAMING_SNAKE_CASE_ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 552
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCAmelCase : Dict = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 362
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class lowercase ( lowercase_ , lowercase_ ):
__SCREAMING_SNAKE_CASE : List[str] = '''resnet'''
__SCREAMING_SNAKE_CASE : Dict = ['''basic''', '''bottleneck''']
def __init__( self , snake_case=3 , snake_case=64 , snake_case=[256, 512, 1024, 2048] , snake_case=[3, 4, 6, 3] , snake_case="bottleneck" , snake_case="relu" , snake_case=False , snake_case=None , snake_case=None , **snake_case , ):
super().__init__(**snake_case )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = downsample_in_first_stage
snake_case_ = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(snake_case ) + 1 )]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : List[Any] = version.parse('''1.11''' )
@property
def a ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def a ( self ):
return 1e-3
| 362
| 1
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
def a__ ( self ):
_A= AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_A= AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(lowerCAmelCase__ )
_A= -1
_A= ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
_A= model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ )
_A= tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_A= TextStreamer(lowerCAmelCase__ )
model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_A= cs.out[:-1]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( self ):
_A= AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_A= AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(lowerCAmelCase__ )
_A= -1
_A= ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
_A= model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ )
_A= tokenizer.decode(greedy_ids[0] )
_A= TextIteratorStreamer(lowerCAmelCase__ )
_A= {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
_A= Thread(target=model.generate , kwargs=lowerCAmelCase__ )
thread.start()
_A= ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( self ):
_A= AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_A= AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(lowerCAmelCase__ )
_A= -1
_A= ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
_A= model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ )
_A= greedy_ids[:, input_ids.shape[1] :]
_A= tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_A= TextStreamer(lowerCAmelCase__ , skip_prompt=lowerCAmelCase__ )
model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_A= cs.out[:-1]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( self ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_A= AutoTokenizer.from_pretrained('distilgpt2' )
_A= AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(lowerCAmelCase__ )
_A= -1
_A= torch.ones((1, 5) , device=lowerCAmelCase__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_A= TextStreamer(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
model.generate(lowerCAmelCase__ , max_new_tokens=1 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_A= cs.out[:-1] # Remove the final "\n"
_A= tokenizer(lowerCAmelCase__ , return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def a__ ( self ):
_A= AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_A= AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(lowerCAmelCase__ )
_A= -1
_A= ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
_A= TextIteratorStreamer(lowerCAmelCase__ , timeout=0.001 )
_A= {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
_A= Thread(target=model.generate , kwargs=lowerCAmelCase__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCAmelCase__ ):
_A= ''
for new_text in streamer:
streamer_text += new_text
| 476
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCAmelCase_ = '''pt'''
elif is_tf_available():
UpperCAmelCase_ = '''tf'''
else:
UpperCAmelCase_ = '''jax'''
class lowerCAmelCase ( _a , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] =ByTaTokenizer
_SCREAMING_SNAKE_CASE : Dict =False
def a__ ( self ):
super().setUp()
_A= ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ):
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def a__ ( self , **lowerCAmelCase__ ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=20 , lowerCAmelCase__=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
_A= []
for i in range(len(lowerCAmelCase__ ) ):
try:
_A= tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_A= list(filter(lambda lowerCAmelCase__ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , lowerCAmelCase__ ) )
_A= list(filter(lambda lowerCAmelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCAmelCase__ ) , lowerCAmelCase__ ) )
if max_length is not None and len(lowerCAmelCase__ ) > max_length:
_A= toks[:max_length]
if min_length is not None and len(lowerCAmelCase__ ) < min_length and len(lowerCAmelCase__ ) > 0:
while len(lowerCAmelCase__ ) < min_length:
_A= toks + toks
# toks_str = [t[1] for t in toks]
_A= [t[0] for t in toks]
# Ensure consistency
_A= tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
if " " not in output_txt and len(lowerCAmelCase__ ) > 1:
_A= (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase__ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase__ )
)
if with_prefix_space:
_A= ' ' + output_txt
_A= tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
return output_txt, output_ids
def a__ ( self ):
_A= self.ta_base_tokenizer
_A= tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_A= tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def a__ ( self ):
_A= self.ta_base_tokenizer
_A= 'Unicode €.'
_A= tokenizer(lowerCAmelCase__ )
_A= [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , lowerCAmelCase__ )
# decoding
_A= tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , 'Unicode €.</s>' )
_A= tokenizer('e è é ê ë' )
_A= [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , lowerCAmelCase__ )
# decoding
_A= tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def a__ ( self ):
_A= self.ta_base_tokenizer
_A= ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_A= [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_A= tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
if FRAMEWORK != "jax":
_A= list(batch.input_ids.numpy()[0] )
else:
_A= list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def a__ ( self ):
_A= self.ta_base_tokenizer
_A= ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_A= tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowerCAmelCase__ )
self.assertIn('attention_mask' , lowerCAmelCase__ )
self.assertNotIn('decoder_input_ids' , lowerCAmelCase__ )
self.assertNotIn('decoder_attention_mask' , lowerCAmelCase__ )
def a__ ( self ):
_A= self.ta_base_tokenizer
_A= [
'Summary of the text.',
'Another summary.',
]
_A= tokenizer(
text_target=lowerCAmelCase__ , max_length=32 , padding='max_length' , truncation=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def a__ ( self ):
_A= self.ta_base_tokenizer
_A= ['A long paragraph for summarization. </s>']
_A= ['Summary of the text. </s>']
# fmt: off
_A= [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_A= [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_A= tokenizer(lowerCAmelCase__ , text_target=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , batch['input_ids'][0] )
self.assertEqual(lowerCAmelCase__ , batch['labels'][0] )
def a__ ( self ):
# safety check on max_len default value so we are sure the test works
_A= self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_A= self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_A= tempfile.mkdtemp()
_A= ' He is very happy, UNwant\u00E9d,running'
_A= tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
_A= tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
_A= after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
shutil.rmtree(lowerCAmelCase__ )
_A= self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_A= tempfile.mkdtemp()
_A= ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_A= tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_A= tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
_A= tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
_A= after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_A= tokenizer.__class__.from_pretrained(lowerCAmelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase__ )
def a__ ( self ):
_A= []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_A= json.load(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_A= json.load(lowerCAmelCase__ )
_A= [f"<extra_id_{i}>" for i in range(125 )]
_A= added_tokens_extra_ids + [
'an_additional_special_token'
]
_A= added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCAmelCase__ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_A= tokenizer_class.from_pretrained(
lowerCAmelCase__ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_A= added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowerCAmelCase__ )]
_A= tokenizer_class.from_pretrained(
lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def a__ ( self ):
_A= []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__ )
_A= tokenizer_class.from_pretrained(lowerCAmelCase__ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def a__ ( self ):
pass
def a__ ( self ):
pass
def a__ ( self ):
pass
def a__ ( self ):
pass
def a__ ( self ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
_A= self.get_tokenizers(fast=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_A= ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_A= tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( self ):
_A= self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_A= [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_A= 0
_A= tokenizer.convert_ids_to_tokens(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
for attr in attributes_list:
setattr(lowerCAmelCase__ , attr + '_id' , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , attr + '_id' ) , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , attr + '_id' , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , attr + '_id' ) , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(lowerCAmelCase__ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(lowerCAmelCase__ , 'additional_special_tokens_ids' ) , [] )
setattr(lowerCAmelCase__ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase__ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase__ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 476
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowerCamelCase (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ = 1_0
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4]
SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = process_story(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = """"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = process_story(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [] )
self.assertEqual(__UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = process_story(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = ["""It was the best of times."""]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = torch.tensor([1, 2, 3, 4] )
SCREAMING_SNAKE_CASE__ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
SCREAMING_SNAKE_CASE__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 2_3 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
SCREAMING_SNAKE_CASE__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = 1_0_1
SCREAMING_SNAKE_CASE__ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
SCREAMING_SNAKE_CASE__ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
SCREAMING_SNAKE_CASE__ = compute_token_type_ids(__UpperCAmelCase , __UpperCAmelCase )
np.testing.assert_array_equal(__UpperCAmelCase , __UpperCAmelCase )
| 196
|
"""simple docstring"""
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
while i * i <= n:
SCREAMING_SNAKE_CASE__ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 1
while True:
i += 1
t_num += i
if count_divisors(snake_case__ ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 196
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( _lowerCamelCase: dict , _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = set(__a ), [start]
while stack:
__SCREAMING_SNAKE_CASE : Optional[Any] = stack.pop()
explored.add(__a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__a )
return explored
UpperCamelCase__ : List[str] = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 720
|
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
UpperCamelCase__ : Optional[Any] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
UpperCamelCase__ : List[str] = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
UpperCamelCase__ : Dict = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Optional[int]="uniform_average" , lowerCAmelCase__ : int=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = mean_squared_error(
lowerCAmelCase__ , lowerCAmelCase__ , sample_weight=lowerCAmelCase__ , multioutput=lowerCAmelCase__ , squared=lowerCAmelCase__ )
return {"mse": mse}
| 178
| 0
|
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def _SCREAMING_SNAKE_CASE ( ):
_A , _A = 9, 1_4 # noqa: F841
_A = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
_A = defaultdict(__snake_case )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_A = mst(__snake_case )
_A = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_A = tuple(answer[:2] )
_A = tuple(edge[::-1] )
assert edge in result or reverse in result
| 107
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCamelCase :str = logging.get_logger(__name__)
class UpperCAmelCase ( __snake_case ):
a: Union[str, Any] = ["input_values", "attention_mask"]
def __init__( self: Dict , __UpperCamelCase: int = 1 , __UpperCamelCase: int = 1_6000 , __UpperCamelCase: float = 0.0 , __UpperCamelCase: bool = False , __UpperCamelCase: int = 80 , __UpperCamelCase: int = 16 , __UpperCamelCase: int = 64 , __UpperCamelCase: str = "hann_window" , __UpperCamelCase: float = 1.0 , __UpperCamelCase: float = 80 , __UpperCamelCase: float = 7600 , __UpperCamelCase: float = 1E-10 , __UpperCamelCase: int = 2 , __UpperCamelCase: bool = True , **__UpperCamelCase: List[Any] , ):
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
_a = do_normalize
_a = return_attention_mask
_a = num_mel_bins
_a = hop_length
_a = win_length
_a = win_function
_a = frame_signal_scale
_a = fmin
_a = fmax
_a = mel_floor
_a = reduction_factor
_a = win_length * sampling_rate // 1000
_a = hop_length * sampling_rate // 1000
_a = optimal_fft_length(self.sample_size )
_a = (self.n_fft // 2) + 1
_a = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase )
_a = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , __UpperCamelCase , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , __UpperCamelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _A ( __UpperCamelCase: List[np.ndarray] , __UpperCamelCase: List[np.ndarray] , __UpperCamelCase: float = 0.0 ):
if attention_mask is not None:
_a = np.array(__UpperCamelCase , np.intaa )
_a = []
for vector, length in zip(__UpperCamelCase , attention_mask.sum(-1 ) ):
_a = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_a = padding_value
normed_input_values.append(__UpperCamelCase )
else:
_a = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _A ( self: Optional[Any] , __UpperCamelCase: np.ndarray , ):
_a = spectrogram(
__UpperCamelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self: Optional[Any] , __UpperCamelCase: Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCamelCase: Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCamelCase: Union[bool, str, PaddingStrategy] = False , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: bool = False , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: Optional[bool] = None , __UpperCamelCase: Optional[Union[str, TensorType]] = None , __UpperCamelCase: Optional[int] = None , **__UpperCamelCase: Any , ):
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
_a = self._process_audio(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase , )
else:
_a = None
if audio_target is not None:
_a = self._process_audio(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase , )
if inputs is None:
return inputs_target
else:
_a = inputs_target['''input_values''']
_a = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
_a = decoder_attention_mask
return inputs
def _A ( self: Optional[Any] , __UpperCamelCase: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCamelCase: bool = False , __UpperCamelCase: Union[bool, str, PaddingStrategy] = False , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: bool = False , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: Optional[bool] = None , __UpperCamelCase: Optional[Union[str, TensorType]] = None , **__UpperCamelCase: Optional[int] , ):
_a = isinstance(__UpperCamelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
_a = is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_a = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
_a = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
_a = speech.astype(np.floataa )
# always return batch
if not is_batched:
_a = [speech]
# needed to make pad() work on spectrogram inputs
_a = self.feature_size
# convert into correct format for padding
if is_target:
_a = [self._extract_mel_features(__UpperCamelCase ) for waveform in speech]
_a = BatchFeature({'''input_values''': features} )
_a = self.num_mel_bins
else:
_a = BatchFeature({'''input_values''': speech} )
_a = self.pad(
__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
_a = feature_size_hack
# convert input values to correct format
_a = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
_a = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(__UpperCamelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
_a = [array.astype(np.floataa ) for array in input_values]
elif isinstance(__UpperCamelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
_a = input_values.astype(np.floataa )
# convert attention_mask to correct format
_a = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
_a = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
_a = (
attention_mask
if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_a = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=__UpperCamelCase , padding_value=self.padding_value )
if return_tensors is not None:
_a = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
def _A ( self: Optional[int] ):
_a = super().to_dict()
# Don't serialize these as they are derived from the other properties.
_a = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 487
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
a_ = logging.get_logger(__name__)
a_ = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class _UpperCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ ="bloom"
lowerCamelCase__ =["past_key_values"]
lowerCamelCase__ ={
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Any , a : Union[str, Any]=25_0880 , a : Optional[Any]=64 , a : int=2 , a : Any=8 , a : Optional[int]=1e-5 , a : str=0.02 , a : List[str]=True , a : Dict=1 , a : Tuple=2 , a : Optional[Any]=False , a : List[Any]=0.0 , a : List[str]=0.0 , a : int=1 , a : Dict=False , **a : Tuple , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = vocab_size
# Backward compatibility with n_embed kwarg
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("n_embed" , lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size if n_embed is None else n_embed
SCREAMING_SNAKE_CASE : int = n_layer
SCREAMING_SNAKE_CASE : Optional[int] = n_head
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : List[str] = pretraining_tp
SCREAMING_SNAKE_CASE : str = apply_residual_connection_post_layernorm
SCREAMING_SNAKE_CASE : int = hidden_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE : str = bos_token_id
SCREAMING_SNAKE_CASE : Tuple = eos_token_id
SCREAMING_SNAKE_CASE : List[str] = slow_but_exact
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
class _UpperCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ =version.parse('1.12' )
def __init__( self : int , a : PretrainedConfig , a : str = "default" , a : List[PatchingSpec] = None , a : bool = False , ) -> Tuple:
"""simple docstring"""
super().__init__(lowerCamelCase__ , task=lowerCamelCase__ , patching_specs=lowerCamelCase__ , use_past=lowerCamelCase__ )
if not getattr(self._config , "pad_token_id" , lowerCamelCase__ ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE : List[str] = 0
@property
def __UpperCamelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(lowerCamelCase__ , direction="inputs" , inverted_values_shape=lowerCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
return self._config.n_head
@property
def __UpperCamelCase ( self : int ) -> float:
"""simple docstring"""
return 1e-3
def __UpperCamelCase ( self : Optional[Any] , a : "PreTrainedTokenizer" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = super(lowerCamelCase__ , self ).generate_dummy_inputs(
lowerCamelCase__ , batch_size=lowerCamelCase__ , seq_length=lowerCamelCase__ , is_pair=lowerCamelCase__ , framework=lowerCamelCase__ )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE : List[Any] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : Any = seqlen + 2
SCREAMING_SNAKE_CASE : Tuple = self._config.hidden_size // self.num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
SCREAMING_SNAKE_CASE : Dict = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
SCREAMING_SNAKE_CASE : Any = [
(torch.zeros(lowerCamelCase__ ), torch.zeros(lowerCamelCase__ )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
SCREAMING_SNAKE_CASE : List[Any] = ordered_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE : List[str] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowerCamelCase__ , lowerCamelCase__ , dtype=lowerCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return 13
| 718
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='wavlm'
def __init__( self : Optional[int] , a : Optional[Any]=32 , a : int=768 , a : Tuple=12 , a : List[str]=12 , a : str=3072 , a : Any="gelu" , a : Dict=0.1 , a : int=0.1 , a : str=0.1 , a : Optional[Any]=0.0 , a : Any=0.1 , a : Any=0.1 , a : List[str]=0.02 , a : List[Any]=1e-5 , a : Any="group" , a : Optional[int]="gelu" , a : List[str]=(512, 512, 512, 512, 512, 512, 512) , a : Any=(5, 2, 2, 2, 2, 2, 2) , a : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , a : Optional[Any]=False , a : Dict=128 , a : Optional[Any]=16 , a : Optional[Any]=320 , a : str=800 , a : Optional[int]=False , a : Tuple=True , a : Optional[Any]=0.05 , a : Any=10 , a : Optional[int]=2 , a : Dict=0.0 , a : str=10 , a : Tuple=320 , a : Optional[int]=2 , a : int=0.1 , a : List[str]=100 , a : Tuple=256 , a : str=256 , a : Tuple=0.1 , a : str="mean" , a : int=False , a : int=False , a : Optional[Any]=256 , a : Any=(512, 512, 512, 512, 1500) , a : Tuple=(5, 3, 3, 1, 1) , a : str=(1, 2, 3, 1, 1) , a : Optional[Any]=512 , a : Optional[Any]=80 , a : Tuple=0 , a : Any=1 , a : Optional[Any]=2 , a : int=False , a : Dict=3 , a : Any=2 , a : List[Any]=3 , a : int=None , **a : Any , ) -> Dict:
"""simple docstring"""
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract_norm
SCREAMING_SNAKE_CASE : Any = feat_extract_activation
SCREAMING_SNAKE_CASE : Any = list(a )
SCREAMING_SNAKE_CASE : Optional[int] = list(a )
SCREAMING_SNAKE_CASE : Optional[Any] = list(a )
SCREAMING_SNAKE_CASE : Any = conv_bias
SCREAMING_SNAKE_CASE : str = num_buckets
SCREAMING_SNAKE_CASE : str = max_bucket_distance
SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE : Any = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.conv_dim )
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : int = hidden_dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_dropout
SCREAMING_SNAKE_CASE : int = feat_proj_dropout
SCREAMING_SNAKE_CASE : Any = final_dropout
SCREAMING_SNAKE_CASE : Optional[int] = layerdrop
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Dict = num_ctc_classes
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : int = do_stable_layer_norm
SCREAMING_SNAKE_CASE : List[Any] = use_weighted_layer_sum
SCREAMING_SNAKE_CASE : List[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE : int = apply_spec_augment
SCREAMING_SNAKE_CASE : int = mask_time_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = mask_time_length
SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks
SCREAMING_SNAKE_CASE : Tuple = mask_feature_prob
SCREAMING_SNAKE_CASE : List[str] = mask_feature_length
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE : str = num_codevectors_per_group
SCREAMING_SNAKE_CASE : Dict = num_codevector_groups
SCREAMING_SNAKE_CASE : Tuple = contrastive_logits_temperature
SCREAMING_SNAKE_CASE : List[Any] = num_negatives
SCREAMING_SNAKE_CASE : Optional[int] = codevector_dim
SCREAMING_SNAKE_CASE : int = proj_codevector_dim
SCREAMING_SNAKE_CASE : List[Any] = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE : Any = ctc_loss_reduction
SCREAMING_SNAKE_CASE : Dict = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE : Any = add_adapter
SCREAMING_SNAKE_CASE : Optional[int] = adapter_kernel_size
SCREAMING_SNAKE_CASE : Any = adapter_stride
SCREAMING_SNAKE_CASE : List[Any] = num_adapter_layers
SCREAMING_SNAKE_CASE : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE : List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE : Union[str, Any] = list(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = list(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = list(a )
SCREAMING_SNAKE_CASE : Tuple = xvector_output_dim
@property
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 193
| 0
|
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]=13 , UpperCamelCase_ : Optional[Any]=30 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : str=True , UpperCamelCase_ : int=32 , UpperCamelCase_ : Tuple=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : str=37 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Optional[Any]=10 , UpperCamelCase_ : Union[str, Any]=0.02 , ):
"""simple docstring"""
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = type_sequence_label_size
__A = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__A = (image_size // patch_size) ** 2
__A = num_patches + 1
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, pixel_values
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] ):
"""simple docstring"""
__A = FlaxViTModel(config=_lowerCamelCase )
__A = model(_lowerCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__A = (self.image_size, self.image_size)
__A = (self.patch_size, self.patch_size)
__A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : Dict ):
"""simple docstring"""
__A = self.type_sequence_label_size
__A = FlaxViTForImageClassification(config=_lowerCamelCase )
__A = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A = 1
__A = FlaxViTForImageClassification(_lowerCamelCase )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(_lowerCamelCase )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = self.prepare_config_and_inputs()
(
__A
) = config_and_inputs
__A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __lowercase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = FlaxViTModelTester(self )
__A = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(_lowerCamelCase )
__A = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__A = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
__A = model_class(_lowerCamelCase )
@jax.jit
def model_jitted(UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Tuple ):
return model(pixel_values=_lowerCamelCase , **_lowerCamelCase )
with self.subTest("""JIT Enabled""" ):
__A = model_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__A = model_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__A = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
__A = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_lowerCamelCase )
| 637
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=True , _lowerCamelCase=1 / 255 , _lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase__ : List[str] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : List[str] = min_resolution
UpperCAmelCase__ : Optional[Any] = max_resolution
UpperCAmelCase__ : List[str] = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : int = image_mean
UpperCAmelCase__ : Dict = image_std
UpperCAmelCase__ : Any = do_rescale
UpperCAmelCase__ : str = rescale_factor
UpperCAmelCase__ : List[str] = do_pad
def snake_case__ ( self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=False):
if not batched:
UpperCAmelCase__ : List[Any] = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image):
UpperCAmelCase__ , UpperCAmelCase__ : str = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ : List[Any] = int(self.size["""shortest_edge"""] * h / w)
UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""]
elif w > h:
UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""]
UpperCAmelCase__ : Optional[int] = int(self.size["""shortest_edge"""] * w / h)
else:
UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""]
UpperCAmelCase__ : int = self.size["""shortest_edge"""]
else:
UpperCAmelCase__ : str = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase__ : Any = max(_lowerCamelCase , key=lambda _lowerCamelCase: item[0])[0]
UpperCAmelCase__ : List[Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = DeformableDetrImageProcessingTester(self)
@property
def snake_case__ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowerCamelCase , """image_mean"""))
self.assertTrue(hasattr(_lowerCamelCase , """image_std"""))
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize"""))
self.assertTrue(hasattr(_lowerCamelCase , """do_resize"""))
self.assertTrue(hasattr(_lowerCamelCase , """do_rescale"""))
self.assertTrue(hasattr(_lowerCamelCase , """do_pad"""))
self.assertTrue(hasattr(_lowerCamelCase , """size"""))
def snake_case__ ( self):
UpperCAmelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333})
self.assertEqual(image_processor.do_pad , _lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowerCamelCase)
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84})
self.assertEqual(image_processor.do_pad , _lowerCamelCase)
def snake_case__ ( self):
pass
def snake_case__ ( self):
# Initialize image_processing
UpperCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image)
# Test not batched input
UpperCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self):
# Initialize image_processing
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray)
# Test not batched input
UpperCAmelCase__ : int = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self):
# Initialize image_processing
UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor)
# Test not batched input
UpperCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : int = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case__ ( self):
# prepare image and target
UpperCAmelCase__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""") as f:
UpperCAmelCase__ : Dict = json.loads(f.read())
UpperCAmelCase__ : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
UpperCAmelCase__ : Dict = DeformableDetrImageProcessor()
UpperCAmelCase__ : int = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , return_tensors="""pt""")
# verify pixel values
UpperCAmelCase__ : Tuple = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["""pixel_values"""].shape , _lowerCamelCase)
UpperCAmelCase__ : Any = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4))
# verify area
UpperCAmelCase__ : List[Any] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCamelCase))
# verify boxes
UpperCAmelCase__ : Union[str, Any] = torch.Size([6, 4])
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCamelCase)
UpperCAmelCase__ : Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCamelCase , atol=1e-3))
# verify image_id
UpperCAmelCase__ : Optional[int] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCamelCase))
# verify is_crowd
UpperCAmelCase__ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCamelCase))
# verify class_labels
UpperCAmelCase__ : Any = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCamelCase))
# verify orig_size
UpperCAmelCase__ : int = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCamelCase))
# verify size
UpperCAmelCase__ : List[Any] = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCamelCase))
@slow
def snake_case__ ( self):
# prepare image, target and masks_path
UpperCAmelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""") as f:
UpperCAmelCase__ : Optional[int] = json.loads(f.read())
UpperCAmelCase__ : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
UpperCAmelCase__ : Tuple = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""")
# encode them
UpperCAmelCase__ : List[str] = DeformableDetrImageProcessor(format="""coco_panoptic""")
UpperCAmelCase__ : Tuple = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , masks_path=_lowerCamelCase , return_tensors="""pt""")
# verify pixel values
UpperCAmelCase__ : str = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["""pixel_values"""].shape , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4))
# verify area
UpperCAmelCase__ : str = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCamelCase))
# verify boxes
UpperCAmelCase__ : List[str] = torch.Size([6, 4])
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCamelCase)
UpperCAmelCase__ : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCamelCase , atol=1e-3))
# verify image_id
UpperCAmelCase__ : Tuple = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCamelCase))
# verify is_crowd
UpperCAmelCase__ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCamelCase))
# verify class_labels
UpperCAmelCase__ : List[Any] = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCamelCase))
# verify masks
UpperCAmelCase__ : Dict = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _lowerCamelCase)
# verify orig_size
UpperCAmelCase__ : Any = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCamelCase))
# verify size
UpperCAmelCase__ : int = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCamelCase))
| 407
| 0
|
def UpperCamelCase_( _A :int )-> str:
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
UpperCamelCase__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ = 1
if upper_limit > 0:
UpperCamelCase__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(UpperCamelCase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
__UpperCamelCase = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 715
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 185
| 0
|
'''simple docstring'''
_UpperCAmelCase : dict[str, float] = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_02_17_66_34e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.35_5818,
}
def UpperCamelCase ( lowercase_ : str , lowercase_ : str , lowercase_ : float ) -> float:
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowercase =(
f'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
f'Valid values are: {", ".join(lowercase_ )}'
)
raise ValueError(lowercase_ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
|
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
A : Optional[int] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
A : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def a__ ( __UpperCamelCase , __UpperCamelCase ):
return np.sqrt(np.sum((np.asarray(__UpperCamelCase ) - np.asarray(__UpperCamelCase )) ** 2 ) )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
return sum((va - va) ** 2 for va, va in zip(__UpperCamelCase , __UpperCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def a__ ( ):
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=1_0_0_0_0 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=1_0_0_0_0 , globals=globals() , ) )
benchmark()
| 140
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ : Optional[int] = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709
|
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModel.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModel.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForPreTraining.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForPreTraining.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(__lowercase , from_pt=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(__lowercase , from_tf=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(__lowercase , from_pt=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForMaskedLM.from_pretrained(__lowercase , from_tf=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForMaskedLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(__lowercase , from_pt=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(__lowercase , from_tf=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForSequenceClassification.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForQuestionAnswering.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_44_10 )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_44_10 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_44_10 )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_44_10 )
| 486
| 0
|
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__lowerCAmelCase = "."
if __name__ == "__main__":
__lowerCAmelCase = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__lowerCAmelCase = []
__lowerCAmelCase = []
with open(doctest_file_path) as fp:
for line in fp:
__lowerCAmelCase = line.strip()
__lowerCAmelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__lowerCAmelCase = "\n".join(non_existent_paths)
raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 684
|
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]:
# Check if the input is valid
if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 684
| 1
|
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __A ( _A , _A , **_A ):
"""simple docstring"""
__a = AutoConfig.from_pretrained(_A , **_A )
__a = AutoModelForSeqaSeqLM.from_config(_A )
model.save_pretrained(_A )
AutoTokenizer.from_pretrained(_A ).save_pretrained(_A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 701
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = """xlm-roberta-xl"""
def __init__( self : str , __SCREAMING_SNAKE_CASE : int=25_08_80 , __SCREAMING_SNAKE_CASE : Dict=25_60 , __SCREAMING_SNAKE_CASE : List[str]=36 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : int=1_02_40 , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : int=5_14 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1E-05 , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[int]="absolute" , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : str , ):
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = classifier_dropout
class A_ ( a_ ):
@property
def _UpperCAmelCase ( self : List[str] ):
if self.task == "multiple-choice":
__a = {0: "batch", 1: "choice", 2: "sequence"}
else:
__a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 525
| 0
|
from scipy.stats import spearmanr
import datasets
_lowercase = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
_lowercase = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
_lowercase = R"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def snake_case_ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def snake_case_ ( self , a__ , a__ , a__=False):
A__ = spearmanr(a__ , a__)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 632
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_lowercase = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_lowercase = concatenate_datasets
_lowercase = DownloadConfig
_lowercase = DownloadManager
_lowercase = DownloadMode
_lowercase = DownloadConfig
_lowercase = DownloadMode
_lowercase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 632
| 1
|
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
SCREAMING_SNAKE_CASE : List[str] = MaskFormerConfig(backbone_config=__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE : Optional[int] = 8_47
SCREAMING_SNAKE_CASE : str = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE : Any = 1_50
SCREAMING_SNAKE_CASE : List[str] = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE : Optional[int] = 1_71
SCREAMING_SNAKE_CASE : int = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
SCREAMING_SNAKE_CASE : Optional[int] = 1_33
SCREAMING_SNAKE_CASE : Dict = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE : Optional[int] = 19
SCREAMING_SNAKE_CASE : Optional[Any] = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE : Any = 65
SCREAMING_SNAKE_CASE : str = 'mapillary-vistas-id2label.json'
SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='dataset' ) ,'r' ) )
SCREAMING_SNAKE_CASE : Optional[int] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
return config
def lowercase__( __UpperCamelCase: Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.layers.{i}.downsample.reduction.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.layers.{i}.downsample.norm.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.layers.{i}.downsample.norm.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((f"sem_seg_head.adapter_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", f"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", f"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", f"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", f"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.weight", f"mask_embedder.{i}.0.weight") )
rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.bias", f"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = dct.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = val
def lowercase__( __UpperCamelCase: Tuple ,__UpperCamelCase: List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : int = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE : str = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE : int = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : Any = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE : List[str] = in_proj_bias[-dim :]
# fmt: on
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : str = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE : str = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE : int = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE : Any = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Dict = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE : int = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE : int = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE : int = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE : Any = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[-hidden_size :]
# fmt: on
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ,__UpperCamelCase: str ,__UpperCamelCase: bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = get_maskformer_config(__UpperCamelCase )
# load original state_dict
with open(__UpperCamelCase ,'rb' ) as f:
SCREAMING_SNAKE_CASE : List[str] = pickle.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
SCREAMING_SNAKE_CASE : str = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
read_in_swin_q_k_v(__UpperCamelCase ,config.backbone_config )
read_in_decoder_q_k_v(__UpperCamelCase ,__UpperCamelCase )
# update to torch tensors
for key, value in state_dict.items():
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase )
# load 🤗 model
SCREAMING_SNAKE_CASE : Optional[int] = MaskFormerForInstanceSegmentation(__UpperCamelCase )
model.eval()
for name, param in model.named_parameters():
print(__UpperCamelCase ,param.shape )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__UpperCamelCase ) == 0, f"Unexpected keys: {unexpected_keys}"
# verify results
SCREAMING_SNAKE_CASE : str = prepare_img()
if "vistas" in model_name:
SCREAMING_SNAKE_CASE : List[str] = 65
elif "cityscapes" in model_name:
SCREAMING_SNAKE_CASE : str = 6_55_35
else:
SCREAMING_SNAKE_CASE : List[Any] = 2_55
SCREAMING_SNAKE_CASE : Tuple = True if 'ade' in model_name else False
SCREAMING_SNAKE_CASE : str = MaskFormerImageProcessor(ignore_index=__UpperCamelCase ,reduce_labels=__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = image_processor(__UpperCamelCase ,return_tensors='pt' )
SCREAMING_SNAKE_CASE : Any = model(**__UpperCamelCase )
print('Logits:' ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCamelCase ,atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(f"nielsr/{model_name}" )
image_processor.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 508
|
'''simple docstring'''
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : dict[str, TrieNode] = {} # Mapping from char to TrieNode
SCREAMING_SNAKE_CASE : int = False
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
for word in words:
self.insert(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
SCREAMING_SNAKE_CASE : Any = TrieNode()
SCREAMING_SNAKE_CASE : Optional[int] = curr.nodes[char]
SCREAMING_SNAKE_CASE : Dict = True
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self
for char in word:
if char not in curr.nodes:
return False
SCREAMING_SNAKE_CASE : int = curr.nodes[char]
return curr.is_leaf
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
def _delete(A, A, A ) -> bool:
if index == len(A ):
# If word does not exist
if not curr.is_leaf:
return False
SCREAMING_SNAKE_CASE : Optional[int] = False
return len(curr.nodes ) == 0
SCREAMING_SNAKE_CASE : List[Any] = word[index]
SCREAMING_SNAKE_CASE : Union[str, Any] = curr.nodes.get(A )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
SCREAMING_SNAKE_CASE : Dict = _delete(A, A, index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self, A, 0 )
def lowercase__( __UpperCamelCase: TrieNode ,__UpperCamelCase: str ):
"""simple docstring"""
if node.is_leaf:
print(__UpperCamelCase ,end=' ' )
for key, value in node.nodes.items():
print_words(__UpperCamelCase ,word + key )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 'banana bananas bandana band apple all beast'.split()
SCREAMING_SNAKE_CASE : List[Any] = TrieNode()
root.insert_many(__UpperCamelCase )
# print_words(root, "")
assert all(root.find(__UpperCamelCase ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: bool ):
"""simple docstring"""
print(str(__UpperCamelCase ) ,'works!' if passes else 'doesn\'t work :(' )
def lowercase__( ):
"""simple docstring"""
assert test_trie()
def lowercase__( ):
"""simple docstring"""
print_results('Testing trie functionality' ,test_trie() )
if __name__ == "__main__":
main()
| 508
| 1
|
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
__A= r'\w+[.]\d+'
__A= re.findall(__lowerCAmelCase,__lowerCAmelCase )
for pat in pats:
__A= key.replace(__lowerCAmelCase,'_'.join(pat.split('.' ) ) )
return key
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int,_SCREAMING_SNAKE_CASE : Optional[Any],_SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__A= pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__A= pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__A= pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__A= pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__A= pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__A= pt_tensor.transpose(2,3,1,0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__A= pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
__A= pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__A= pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__A= pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Optional[int],_SCREAMING_SNAKE_CASE : str,_SCREAMING_SNAKE_CASE : Union[str, Any]=42 ):
"""simple docstring"""
__A= {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__A= flax_model.init_weights(PRNGKey(__lowerCAmelCase ) )
__A= flatten_dict(__lowerCAmelCase )
__A= {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__A= rename_key(__lowerCAmelCase )
__A= tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
__A, __A= rename_key_and_reshape_tensor(__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
__A= jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
| 186
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_A = logging.get_logger(__name__)
_A = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class _lowerCAmelCase ( __a ):
_lowercase ='''bloom'''
_lowercase =['''past_key_values''']
_lowercase ={
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self , _UpperCamelCase=250_880 , _UpperCamelCase=64 , _UpperCamelCase=2 , _UpperCamelCase=8 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=1 , _UpperCamelCase=2 , _UpperCamelCase=False , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1 , _UpperCamelCase=False , **_UpperCamelCase , ) -> str:
lowerCAmelCase_ = vocab_size
# Backward compatibility with n_embed kwarg
lowerCAmelCase_ = kwargs.pop("n_embed" , _UpperCamelCase )
lowerCAmelCase_ = hidden_size if n_embed is None else n_embed
lowerCAmelCase_ = n_layer
lowerCAmelCase_ = n_head
lowerCAmelCase_ = layer_norm_epsilon
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = pretraining_tp
lowerCAmelCase_ = apply_residual_connection_post_layernorm
lowerCAmelCase_ = hidden_dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = slow_but_exact
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
class _lowerCAmelCase ( __a ):
_lowercase =version.parse('''1.12''' )
def __init__( self , _UpperCamelCase , _UpperCamelCase = "default" , _UpperCamelCase = None , _UpperCamelCase = False , ) -> int:
super().__init__(_UpperCamelCase , task=_UpperCamelCase , patching_specs=_UpperCamelCase , use_past=_UpperCamelCase )
if not getattr(self._config , "pad_token_id" , _UpperCamelCase ):
# TODO: how to do that better?
lowerCAmelCase_ = 0
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
lowerCAmelCase_ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_UpperCamelCase , direction="inputs" , inverted_values_shape=_UpperCamelCase )
lowerCAmelCase_ = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCAmelCase_ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __a ( self ) -> int:
return self._config.n_layer
@property
def __a ( self ) -> int:
return self._config.n_head
@property
def __a ( self ) -> float:
return 1e-3
def __a ( self , _UpperCamelCase , _UpperCamelCase = -1 , _UpperCamelCase = -1 , _UpperCamelCase = False , _UpperCamelCase = None , ) -> Mapping[str, Any]:
lowerCAmelCase_ = super(_UpperCamelCase , self ).generate_dummy_inputs(
_UpperCamelCase , batch_size=_UpperCamelCase , seq_length=_UpperCamelCase , is_pair=_UpperCamelCase , framework=_UpperCamelCase )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCAmelCase_ = seqlen + 2
lowerCAmelCase_ = self._config.hidden_size // self.num_attention_heads
lowerCAmelCase_ = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowerCAmelCase_ = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowerCAmelCase_ = [
(torch.zeros(_UpperCamelCase ), torch.zeros(_UpperCamelCase )) for _ in range(self.num_layers )
]
lowerCAmelCase_ = common_inputs["attention_mask"]
if self.use_past:
lowerCAmelCase_ = ordered_inputs["attention_mask"].dtype
lowerCAmelCase_ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_UpperCamelCase , _UpperCamelCase , dtype=_UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def __a ( self ) -> int:
return 13
| 290
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__A : str = logging.get_logger(__name__)
__A : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__A : str = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
__A : Union[str, Any] = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class __UpperCamelCase ( lowercase__ ):
lowercase : str = VOCAB_FILES_NAMES
lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : str = ['input_ids', 'attention_mask']
lowercase : List[Any] = RobertaTokenizer
def __init__( self :int ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :Tuple=None ,_UpperCamelCase :Dict="replace" ,_UpperCamelCase :List[str]="<s>" ,_UpperCamelCase :Dict="</s>" ,_UpperCamelCase :List[str]="</s>" ,_UpperCamelCase :Any="<s>" ,_UpperCamelCase :Tuple="<unk>" ,_UpperCamelCase :int="<pad>" ,_UpperCamelCase :str="<mask>" ,_UpperCamelCase :Dict=False ,_UpperCamelCase :Dict=True ,**_UpperCamelCase :Any ,):
super().__init__(
_UpperCamelCase ,_UpperCamelCase ,tokenizer_file=_UpperCamelCase ,errors=_UpperCamelCase ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,sep_token=_UpperCamelCase ,cls_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,pad_token=_UpperCamelCase ,mask_token=_UpperCamelCase ,add_prefix_space=_UpperCamelCase ,trim_offsets=_UpperCamelCase ,**_UpperCamelCase ,)
snake_case_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,_UpperCamelCase ) != add_prefix_space:
snake_case_ : Union[str, Any] = getattr(_UpperCamelCase ,pre_tok_state.pop("""type""" ) )
snake_case_ : Dict = add_prefix_space
snake_case_ : Dict = pre_tok_class(**_UpperCamelCase )
snake_case_ : Any = add_prefix_space
snake_case_ : Tuple = """post_processor"""
snake_case_ : str = getattr(self.backend_tokenizer ,_UpperCamelCase ,_UpperCamelCase )
if tokenizer_component_instance:
snake_case_ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ : str = tuple(state["""sep"""] )
if "cls" in state:
snake_case_ : Optional[int] = tuple(state["""cls"""] )
snake_case_ : List[Any] = False
if state.get("""add_prefix_space""" ,_UpperCamelCase ) != add_prefix_space:
snake_case_ : Tuple = add_prefix_space
snake_case_ : Optional[int] = True
if state.get("""trim_offsets""" ,_UpperCamelCase ) != trim_offsets:
snake_case_ : Union[str, Any] = trim_offsets
snake_case_ : Tuple = True
if changes_to_apply:
snake_case_ : List[str] = getattr(_UpperCamelCase ,state.pop("""type""" ) )
snake_case_ : Any = component_class(**_UpperCamelCase )
setattr(self.backend_tokenizer ,_UpperCamelCase ,_UpperCamelCase )
@property
def a__ ( self :Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def a__ ( self :Optional[int] ,_UpperCamelCase :Any ):
snake_case_ : List[Any] = AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else value
snake_case_ : Optional[int] = value
def a__ ( self :str ,*_UpperCamelCase :str ,**_UpperCamelCase :Tuple ):
snake_case_ : str = kwargs.get("""is_split_into_words""" ,_UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCamelCase ,**_UpperCamelCase )
def a__ ( self :Optional[int] ,*_UpperCamelCase :List[str] ,**_UpperCamelCase :Dict ):
snake_case_ : List[str] = kwargs.get("""is_split_into_words""" ,_UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCamelCase ,**_UpperCamelCase )
def a__ ( self :Optional[int] ,_UpperCamelCase :str ,_UpperCamelCase :Optional[str] = None ):
snake_case_ : Any = self._tokenizer.model.save(_UpperCamelCase ,name=_UpperCamelCase )
return tuple(_UpperCamelCase )
def a__ ( self :Optional[int] ,_UpperCamelCase :Tuple ,_UpperCamelCase :List[str]=None ):
snake_case_ : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a__ ( self :Optional[Any] ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ):
snake_case_ : List[str] = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 267
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : List[str] = KandinskyVaaPriorPipeline
lowercase : Optional[Any] = ['prompt']
lowercase : Dict = ['prompt', 'negative_prompt']
lowercase : Dict = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
lowercase : int = False
@property
def a__ ( self :int ):
return 3_2
@property
def a__ ( self :Any ):
return 3_2
@property
def a__ ( self :Union[str, Any] ):
return self.time_input_dim
@property
def a__ ( self :int ):
return self.time_input_dim * 4
@property
def a__ ( self :Dict ):
return 1_0_0
@property
def a__ ( self :List[Any] ):
snake_case_ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def a__ ( self :int ):
torch.manual_seed(0 )
snake_case_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModelWithProjection(_UpperCamelCase )
@property
def a__ ( self :Dict ):
torch.manual_seed(0 )
snake_case_ : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 1_2,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
snake_case_ : int = PriorTransformer(**_UpperCamelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
snake_case_ : Any = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def a__ ( self :Optional[int] ):
torch.manual_seed(0 )
snake_case_ : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=2_2_4 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1_4 ,)
snake_case_ : Optional[Any] = CLIPVisionModelWithProjection(_UpperCamelCase )
return model
@property
def a__ ( self :List[Any] ):
snake_case_ : Any = CLIPImageProcessor(
crop_size=2_2_4 ,do_center_crop=_UpperCamelCase ,do_normalize=_UpperCamelCase ,do_resize=_UpperCamelCase ,image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] ,image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] ,resample=3 ,size=2_2_4 ,)
return image_processor
def a__ ( self :List[Any] ):
snake_case_ : Tuple = self.dummy_prior
snake_case_ : Any = self.dummy_image_encoder
snake_case_ : Optional[int] = self.dummy_text_encoder
snake_case_ : Any = self.dummy_tokenizer
snake_case_ : Union[str, Any] = self.dummy_image_processor
snake_case_ : Tuple = UnCLIPScheduler(
variance_type="""fixed_small_log""" ,prediction_type="""sample""" ,num_train_timesteps=1_0_0_0 ,clip_sample=_UpperCamelCase ,clip_sample_range=10.0 ,)
snake_case_ : Tuple = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def a__ ( self :Dict ,_UpperCamelCase :str ,_UpperCamelCase :Union[str, Any]=0 ):
if str(_UpperCamelCase ).startswith("""mps""" ):
snake_case_ : Optional[int] = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ : Dict = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def a__ ( self :int ):
snake_case_ : Union[str, Any] = """cpu"""
snake_case_ : int = self.get_dummy_components()
snake_case_ : int = self.pipeline_class(**_UpperCamelCase )
snake_case_ : Union[str, Any] = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ : Any = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
snake_case_ : List[Any] = output.image_embeds
snake_case_ : List[Any] = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) ,return_dict=_UpperCamelCase ,)[0]
snake_case_ : Dict = image[0, -1_0:]
snake_case_ : Optional[Any] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
snake_case_ : Optional[int] = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a__ ( self :List[Any] ):
snake_case_ : Any = torch_device == """cpu"""
snake_case_ : Any = True
snake_case_ : Dict = False
self._test_inference_batch_single_identical(
test_max_difference=_UpperCamelCase ,relax_max_difference=_UpperCamelCase ,test_mean_pixel_difference=_UpperCamelCase ,)
@skip_mps
def a__ ( self :str ):
snake_case_ : str = torch_device == """cpu"""
snake_case_ : List[str] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_UpperCamelCase ,test_mean_pixel_difference=_UpperCamelCase ,)
| 267
| 1
|
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowercase ( snake_case_ ):
__UpperCAmelCase = '''naver-clova-ix/donut-base-finetuned-docvqa'''
__UpperCAmelCase = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
__UpperCAmelCase = '''document_qa'''
__UpperCAmelCase = AutoProcessor
__UpperCAmelCase = VisionEncoderDecoderModel
__UpperCAmelCase = ['''image''', '''text''']
__UpperCAmelCase = ['''text''']
def __init__( self , *lowercase_ , **lowercase_) -> int:
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.')
super().__init__(*__snake_case , **__snake_case)
def _a ( self , lowercase_ , lowercase_) -> Any:
__snake_case = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__snake_case = task_prompt.replace('{user_input}' , __snake_case)
__snake_case = self.pre_processor.tokenizer(
__snake_case , add_special_tokens=__snake_case , return_tensors='pt').input_ids
__snake_case = self.pre_processor(__snake_case , return_tensors='pt').pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _a ( self , lowercase_) -> Any:
return self.model.generate(
inputs['pixel_values'].to(self.device) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__snake_case , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__snake_case , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__snake_case , ).sequences
def _a ( self , lowercase_) -> List[str]:
__snake_case = self.pre_processor.batch_decode(__snake_case)[0]
__snake_case = sequence.replace(self.pre_processor.tokenizer.eos_token , '')
__snake_case = sequence.replace(self.pre_processor.tokenizer.pad_token , '')
__snake_case = re.sub(r'<.*?>' , '' , __snake_case , count=1).strip() # remove first task start token
__snake_case = self.pre_processor.tokenajson(__snake_case)
return sequence["answer"]
| 313
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if "cls_token" in name:
_lowerCAmelCase = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
_lowerCAmelCase = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
_lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
_lowerCAmelCase = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_lowerCAmelCase = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
_lowerCAmelCase = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
_lowerCAmelCase = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
_lowerCAmelCase = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
_lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
_lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
_lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
_lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
_lowerCAmelCase = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
_lowerCAmelCase = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCAmelCase = orig_state_dict.pop(lowerCAmelCase )
if "qkv" in key:
_lowerCAmelCase = key.split(""".""" )
_lowerCAmelCase = int(key_split[1] )
if "decoder_blocks" in key:
_lowerCAmelCase = config.decoder_hidden_size
_lowerCAmelCase = """decoder.decoder_layers."""
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
elif "bias" in key:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = config.hidden_size
_lowerCAmelCase = """vit.encoder.layer."""
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
elif "bias" in key:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = val
return orig_state_dict
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = ViTMAEConfig()
if "large" in checkpoint_url:
_lowerCAmelCase = 10_24
_lowerCAmelCase = 40_96
_lowerCAmelCase = 24
_lowerCAmelCase = 16
elif "huge" in checkpoint_url:
_lowerCAmelCase = 14
_lowerCAmelCase = 12_80
_lowerCAmelCase = 51_20
_lowerCAmelCase = 32
_lowerCAmelCase = 16
_lowerCAmelCase = ViTMAEForPreTraining(lowerCAmelCase )
_lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" )["""model"""]
_lowerCAmelCase = ViTMAEImageProcessor(size=config.image_size )
_lowerCAmelCase = convert_state_dict(lowerCAmelCase , lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
_lowerCAmelCase = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
_lowerCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
_lowerCAmelCase = ViTMAEImageProcessor(size=config.image_size )
_lowerCAmelCase = image_processor(images=lowerCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
_lowerCAmelCase = model(**lowerCAmelCase )
_lowerCAmelCase = outputs.logits
if "large" in checkpoint_url:
_lowerCAmelCase = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
_lowerCAmelCase = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
_lowerCAmelCase = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase , atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
A__ : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__ : List[Any] =parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 207
| 0
|
'''simple docstring'''
_snake_case : List[str] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
_a = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_snake_case : list[bool | None] = [None] * 10000000
_snake_case : List[Any] = True
_snake_case : Tuple = False
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_a = chain(next_number(UpperCamelCase ) )
_a = number_chain
while number < 1000_0000:
_a = number_chain
number *= 10
return number_chain
def snake_case_ (UpperCamelCase : int = 1000_0000 ):
'''simple docstring'''
for i in range(1 , UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 377
|
'''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class A ( _a ):
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_a = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ):
_a = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ):
_a = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''bool''' ) , type=Value('''int64''' ) ) )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_a = pa.array(TypedSequence([1, 2, 3] , type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_a = pa.array(TypedSequence(['''foo''', '''bar'''] , type=Value('''int64''' ) ) )
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_a = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_a = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=Value('''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_a = pa.array(TypedSequence(['''foo''', '''bar'''] , type=ArrayaD((1, 3) , '''int64''' ) ) )
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
_a = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
_a = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
import PIL.Image
_a = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
'''datasets.arrow_writer.cast_to_python_objects''' , side_effect=lowerCAmelCase_ ) as mock_cast_to_python_objects:
_a = pa.array(TypedSequence([{'''path''': None, '''bytes''': B'''image_bytes'''}, pil_image] , type=Image() ) )
_a , _a = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('''optimize_list_casting''' , lowerCAmelCase_ )
self.assertFalse(kwargs['''optimize_list_casting'''] )
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : int ):
'''simple docstring'''
_a = pa.BufferReader(UpperCamelCase ) if isinstance(UpperCamelCase , pa.Buffer ) else pa.memory_map(UpperCamelCase )
_a = pa.ipc.open_stream(UpperCamelCase )
_a = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : str ):
'''simple docstring'''
_a = pa.BufferOutputStream()
_a = pa.schema(UpperCamelCase ) if fields else None
with ArrowWriter(stream=UpperCamelCase , schema=UpperCamelCase , writer_batch_size=UpperCamelCase ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case_ ():
'''simple docstring'''
_a = pa.BufferOutputStream()
_a = Features({'''labels''': ClassLabel(names=['''neg''', '''pos'''] )} )
with ArrowWriter(stream=UpperCamelCase , features=UpperCamelCase ) as writer:
writer.write({'''labels''': 0} )
writer.write({'''labels''': 1} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_a = pa.BufferReader(output.getvalue() )
_a = pa.ipc.open_stream(UpperCamelCase )
_a = f.read_all()
_a = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCamelCase )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
def snake_case_ (UpperCamelCase : Any ):
'''simple docstring'''
_a = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase , writer_batch_size=UpperCamelCase , hash_salt='''split_name''' , check_duplicates=UpperCamelCase , ) as writer:
with pytest.raises(UpperCamelCase ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=[1, 2] )
_a , _a = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] )
def snake_case_ (UpperCamelCase : Any ):
'''simple docstring'''
_a = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase , writer_batch_size=UpperCamelCase , hash_salt='''split_name''' , check_duplicates=UpperCamelCase , ) as writer:
with pytest.raises(UpperCamelCase ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=10 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=10 )
_a , _a = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] )
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
_a = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase , writer_batch_size=UpperCamelCase , hash_salt='''split_name''' , check_duplicates=UpperCamelCase , ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=1 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=2 )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = pa.BufferOutputStream()
_a = pa.schema(UpperCamelCase ) if fields else None
with ArrowWriter(stream=UpperCamelCase , schema=UpperCamelCase , writer_batch_size=UpperCamelCase ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
writer.write_batch({'''col_1''': [], '''col_2''': []} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Any ):
'''simple docstring'''
_a = pa.BufferOutputStream()
_a = pa.schema(UpperCamelCase ) if fields else None
with ArrowWriter(stream=UpperCamelCase , schema=UpperCamelCase , writer_batch_size=UpperCamelCase ) as writer:
writer.write_table(pa.Table.from_pydict({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = pa.BufferOutputStream()
_a = pa.schema(UpperCamelCase ) if fields else None
with ArrowWriter(stream=UpperCamelCase , schema=UpperCamelCase , writer_batch_size=UpperCamelCase ) as writer:
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''foo'''], '''col_2''': [1]} ) )
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''bar'''], '''col_2''': [2]} ) )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case_ ():
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_a = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
_a = os.path.join(UpperCamelCase , '''test.arrow''' )
with ArrowWriter(path=UpperCamelCase , schema=pa.schema(UpperCamelCase ) ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCamelCase , metadata=writer._schema.metadata )
_check_output(UpperCamelCase , 1 )
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
if pa.types.is_list(UpperCamelCase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Any ):
'''simple docstring'''
if isinstance(lst[0] , UpperCamelCase ):
change_first_primitive_element_in_list(lst[0] , UpperCamelCase )
else:
_a = value
@pytest.mark.parametrize('''optimized_int_type, expected_dtype''' , [(None, pa.intaa()), (Value('''int32''' ), pa.intaa())] )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = pa.array(TypedSequence(UpperCamelCase , optimized_int_type=UpperCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'''col, expected_dtype''' , [
('''attention_mask''', pa.inta()),
('''special_tokens_mask''', pa.inta()),
('''token_type_ids''', pa.inta()),
('''input_ids''', pa.intaa()),
('''other''', pa.intaa()),
] , )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = pa.array(OptimizedTypedSequence(UpperCamelCase , col=UpperCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_a = copy.deepcopy(UpperCamelCase )
_a = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCamelCase , UpperCamelCase )
_a = pa.array(OptimizedTypedSequence(UpperCamelCase , col=UpperCamelCase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('''raise_exception''' , [False, True] )
def snake_case_ (UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_a = str(tmp_path / '''dataset-train.arrow''' )
try:
with ArrowWriter(path=UpperCamelCase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def snake_case_ (UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = '''mock://dataset-train.arrow'''
with ArrowWriter(path=UpperCamelCase , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCamelCase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCamelCase )
def snake_case_ ():
'''simple docstring'''
_a = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCamelCase ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_a = pa.BufferReader(output.getvalue() )
_a = pq.read_table(UpperCamelCase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('''embed_local_files''' , [False, True] )
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
import PIL.Image
_a = str(tmp_path / '''test_image_rgb.jpg''' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCamelCase , format='''png''' )
_a = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCamelCase , features=Features({'''image''': Image()} ) , embed_local_files=UpperCamelCase ) as writer:
writer.write({'''image''': image_path} )
writer.finalize()
_a = pa.BufferReader(output.getvalue() )
_a = pq.read_table(UpperCamelCase )
_a = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['''image'''][0]['''path'''] , UpperCamelCase )
with open(UpperCamelCase , '''rb''' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def snake_case_ ():
'''simple docstring'''
_a = pa.schema([pa.field('''col_1''' , pa.string() , nullable=UpperCamelCase )] )
_a = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCamelCase ) as writer:
writer._build_writer(inferred_schema=UpperCamelCase )
assert writer._schema == pa.schema([pa.field('''col_1''' , pa.string() )] )
| 377
| 1
|
'''simple docstring'''
import random
def A ( UpperCamelCase_ : int ) -> bool:
'''simple docstring'''
lowerCAmelCase__ = num - 1
lowerCAmelCase__ = 0
while s % 2 == 0:
lowerCAmelCase__ = s // 2
t += 1
for _ in range(5 ):
lowerCAmelCase__ = random.randrange(2 , num - 1 )
lowerCAmelCase__ = pow(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if v != 1:
lowerCAmelCase__ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowerCAmelCase__ = i + 1
lowerCAmelCase__ = (v**2) % num
return True
def A ( UpperCamelCase_ : int ) -> bool:
'''simple docstring'''
if num < 2:
return False
lowerCAmelCase__ = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(UpperCamelCase_ )
def A ( UpperCamelCase_ : int = 10_24 ) -> int:
'''simple docstring'''
while True:
lowerCAmelCase__ = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(UpperCamelCase_ ):
return num
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 48
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
@property
def a__( self : List[str] )-> Dict:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def a__( self : Tuple )-> int:
"""simple docstring"""
UpperCAmelCase = self.dummy_uncond_unet
UpperCAmelCase = PNDMScheduler()
UpperCAmelCase = PNDMPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pndm.to(lowerCAmelCase )
pndm.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pndm(generator=lowerCAmelCase , num_inference_steps=20 , output_type='''numpy''' ).images
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pndm(generator=lowerCAmelCase , num_inference_steps=20 , output_type='''numpy''' , return_dict=lowerCAmelCase )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Dict )-> List[str]:
"""simple docstring"""
UpperCAmelCase = '''google/ddpm-cifar10-32'''
UpperCAmelCase = UNetaDModel.from_pretrained(lowerCAmelCase )
UpperCAmelCase = PNDMScheduler()
UpperCAmelCase = PNDMPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pndm.to(lowerCAmelCase )
pndm.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pndm(generator=lowerCAmelCase , output_type='''numpy''' ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 210
| 0
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a__ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Any , **UpperCAmelCase__ : Optional[Any] ) -> Optional[int]:
super().__init__(**UpperCAmelCase__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , "vision" )
self.check_model_type(UpperCAmelCase__ )
def __call__( self : str , UpperCAmelCase__ : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase__ : Union[str, List[str]] = None , **UpperCAmelCase__ : Dict , ) -> List[Any]:
if "text_queries" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs.pop("text_queries" )
if isinstance(UpperCAmelCase__ , (str, Image.Image) ):
__SCREAMING_SNAKE_CASE = {"image": image, "candidate_labels": candidate_labels}
else:
__SCREAMING_SNAKE_CASE = image
__SCREAMING_SNAKE_CASE = super().__call__(UpperCAmelCase__ , **UpperCAmelCase__ )
return results
def UpperCAmelCase_ ( self : List[Any] , **UpperCAmelCase__ : Union[str, Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["threshold"]
if "top_k" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["top_k"]
return {}, {}, postprocess_params
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = load_image(inputs["image"] )
__SCREAMING_SNAKE_CASE = inputs["candidate_labels"]
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = candidate_labels.split("," )
__SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = self.tokenizer(UpperCAmelCase__ , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = self.image_processor(UpperCAmelCase__ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCAmelCase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Tuple ) -> int:
__SCREAMING_SNAKE_CASE = model_inputs.pop("target_size" )
__SCREAMING_SNAKE_CASE = model_inputs.pop("candidate_label" )
__SCREAMING_SNAKE_CASE = model_inputs.pop("is_last" )
__SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[Any]=None ) -> List[str]:
__SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
__SCREAMING_SNAKE_CASE = model_output["candidate_label"]
__SCREAMING_SNAKE_CASE = BaseModelOutput(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase__ , threshold=UpperCAmelCase__ , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
__SCREAMING_SNAKE_CASE = outputs["scores"][index].item()
__SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs["boxes"][index][0] )
__SCREAMING_SNAKE_CASE = {"score": score, "label": label, "box": box}
results.append(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : x["score"] , reverse=UpperCAmelCase__ )
if top_k:
__SCREAMING_SNAKE_CASE = results[:top_k]
return results
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : "torch.Tensor" ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = box.int().tolist()
__SCREAMING_SNAKE_CASE = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 553
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env")
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
])
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Dict ) -> Any:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=UpperCAmelCase__ , )
assert hasattr(self , "env" )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : int ) -> Any:
__SCREAMING_SNAKE_CASE = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
__SCREAMING_SNAKE_CASE = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCAmelCase__ , instance_count=UpperCAmelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCAmelCase__ , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCAmelCase__ , py_version="py36" , )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[int] ) -> Optional[Any]:
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
# create estimator
__SCREAMING_SNAKE_CASE = self.create_estimator(UpperCAmelCase__ )
# run training
estimator.fit()
# result dataframe
__SCREAMING_SNAKE_CASE = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__SCREAMING_SNAKE_CASE = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__SCREAMING_SNAKE_CASE = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__SCREAMING_SNAKE_CASE = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , UpperCAmelCase__ )
| 553
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def __A (_SCREAMING_SNAKE_CASE ) ->YolosConfig:
"""simple docstring"""
lowerCAmelCase__ :List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCAmelCase__ :Tuple = 192
lowerCAmelCase__ :List[str] = 768
lowerCAmelCase__ :Optional[int] = 12
lowerCAmelCase__ :int = 3
lowerCAmelCase__ :List[str] = [800, 1333]
lowerCAmelCase__ :Optional[Any] = False
elif yolos_name == "yolos_s_dWr":
lowerCAmelCase__ :List[Any] = 330
lowerCAmelCase__ :str = 14
lowerCAmelCase__ :str = 6
lowerCAmelCase__ :Dict = 1320
elif "yolos_s" in yolos_name:
lowerCAmelCase__ :int = 384
lowerCAmelCase__ :int = 1536
lowerCAmelCase__ :int = 12
lowerCAmelCase__ :List[str] = 6
elif "yolos_b" in yolos_name:
lowerCAmelCase__ :Tuple = [800, 1344]
lowerCAmelCase__ :List[str] = 91
lowerCAmelCase__ :Dict = 'huggingface/label-files'
lowerCAmelCase__ :Union[str, Any] = 'coco-detection-id2label.json'
lowerCAmelCase__ :List[str] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ :Optional[Any] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase__ :int = idalabel
lowerCAmelCase__ :List[Any] = {v: k for k, v in idalabel.items()}
return config
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ :Union[str, Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ :int = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase__ :int = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ :Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ :Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ :Dict = in_proj_weight[-config.hidden_size :, :]
lowerCAmelCase__ :List[str] = in_proj_bias[-config.hidden_size :]
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
if "backbone" in name:
lowerCAmelCase__ :str = name.replace('backbone' , 'vit' )
if "cls_token" in name:
lowerCAmelCase__ :str = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
lowerCAmelCase__ :Tuple = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
lowerCAmelCase__ :Dict = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
lowerCAmelCase__ :List[str] = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowerCAmelCase__ :Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
lowerCAmelCase__ :List[Any] = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
lowerCAmelCase__ :str = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCAmelCase__ :List[Any] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCAmelCase__ :List[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCAmelCase__ :Union[str, Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCAmelCase__ :str = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase__ :int = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
lowerCAmelCase__ :Tuple = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
lowerCAmelCase__ :Tuple = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
lowerCAmelCase__ :Tuple = name.replace('vit.norm' , 'vit.layernorm' )
return name
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ :Any = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "qkv" in key:
lowerCAmelCase__ :str = key.split('.' )
lowerCAmelCase__ :Any = int(key_split[2] )
lowerCAmelCase__ :Union[str, Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCAmelCase__ :Dict = val[:dim, :]
lowerCAmelCase__ :Optional[Any] = val[
dim : dim * 2, :
]
lowerCAmelCase__ :Union[str, Any] = val[-dim:, :]
else:
lowerCAmelCase__ :Dict = val[:dim]
lowerCAmelCase__ :List[Any] = val[dim : dim * 2]
lowerCAmelCase__ :Any = val[-dim:]
else:
lowerCAmelCase__ :int = val
return orig_state_dict
def __A () ->torch.Tensor:
"""simple docstring"""
lowerCAmelCase__ :Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ :Union[str, Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->Tuple:
"""simple docstring"""
lowerCAmelCase__ :str = get_yolos_config(_SCREAMING_SNAKE_CASE )
# load original state_dict
lowerCAmelCase__ :Optional[Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
# load 🤗 model
lowerCAmelCase__ :Optional[Any] = YolosForObjectDetection(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ :str = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCAmelCase__ :Dict = 800 if yolos_name != 'yolos_ti' else 512
lowerCAmelCase__ :Dict = YolosImageProcessor(format='coco_detection' , size=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = image_processor(images=prepare_img() , return_tensors='pt' )
lowerCAmelCase__ :List[Any] = model(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = outputs.logits, outputs.pred_boxes
lowerCAmelCase__ , lowerCAmelCase__ :int = None, None
if yolos_name == "yolos_ti":
lowerCAmelCase__ :List[str] = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCAmelCase__ :Union[str, Any] = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCAmelCase__ :int = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCAmelCase__ :Tuple = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCAmelCase__ :List[str] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCAmelCase__ :Dict = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCAmelCase__ :Dict = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCAmelCase__ :Optional[int] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCAmelCase__ :str = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCAmelCase__ :Any = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
lowerCAmelCase__ :Union[str, Any] = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
lowerCAmelCase__ :Union[str, Any] = model_mapping[yolos_name]
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization='hustvl' )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization='hustvl' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 93
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : int = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'visual_bert'
def __init__( self: int , _SCREAMING_SNAKE_CASE: Dict=3_0522 , _SCREAMING_SNAKE_CASE: Union[str, Any]=768 , _SCREAMING_SNAKE_CASE: List[str]=512 , _SCREAMING_SNAKE_CASE: Optional[int]=12 , _SCREAMING_SNAKE_CASE: List[str]=12 , _SCREAMING_SNAKE_CASE: Tuple=3072 , _SCREAMING_SNAKE_CASE: Union[str, Any]="gelu" , _SCREAMING_SNAKE_CASE: Tuple=0.1 , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: List[Any]=512 , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: Optional[int]=0.02 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1e-12 , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: str=1 , _SCREAMING_SNAKE_CASE: Tuple=0 , _SCREAMING_SNAKE_CASE: Any=2 , **_SCREAMING_SNAKE_CASE: List[str] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Tuple = max_position_embeddings
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : Union[str, Any] = visual_embedding_dim
__lowerCAmelCase : List[str] = num_hidden_layers
__lowerCAmelCase : Optional[int] = num_attention_heads
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase : int = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = initializer_range
__lowerCAmelCase : Dict = type_vocab_size
__lowerCAmelCase : Optional[int] = layer_norm_eps
__lowerCAmelCase : Union[str, Any] = bypass_transformer
__lowerCAmelCase : List[str] = special_visual_initialize
| 293
| 0
|
def __UpperCamelCase ( lowerCAmelCase__ : int = 4_0_0_0_0_0_0 ):
__a : Dict = []
__a , __a : Dict = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(lowerCAmelCase__ )
__a , __a : List[Any] = b, a + b
return sum(lowerCAmelCase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 326
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __UpperCamelCase ( lowerCAmelCase__ : Any ):
__a : Dict = filter(lambda lowerCAmelCase__ : p.requires_grad , model.parameters() )
__a : Tuple = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowercase__ =logging.getLogger(__name__)
def __UpperCamelCase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] ):
if metric == "rouge2":
__a : List[Any] = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__a : List[str] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__a : Optional[Any] = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
''' function.''' )
__a : List[Any] = ModelCheckpoint(
dirpath=lowerCAmelCase__ , filename=lowerCAmelCase__ , monitor=f"val_{metric}" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ):
return EarlyStopping(
monitor=f"val_{metric}" , mode='''min''' if '''loss''' in metric else '''max''' , patience=lowerCAmelCase__ , verbose=lowerCAmelCase__ , )
class UpperCamelCase__ ( pl.Callback ):
def lowerCAmelCase (self : List[str] , snake_case_ : Any , snake_case_ : Any ):
__a : Optional[int] = {f"lr_group_{i}": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case_ )
@rank_zero_only
def lowerCAmelCase (self : str , snake_case_ : pl.Trainer , snake_case_ : pl.LightningModule , snake_case_ : str , snake_case_ : Dict=True ):
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" )
__a : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
__a : Union[str, Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
__a : Union[str, Any] = od / '''test_results.txt'''
__a : Optional[Any] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__a : Optional[int] = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
__a : List[str] = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=snake_case_ )
generations_file.parent.mkdir(exist_ok=snake_case_ )
with open(snake_case_ , '''a+''' ) as writer:
for key in sorted(snake_case_ ):
if key in ["log", "progress_bar", "preds"]:
continue
__a : Tuple = metrics[key]
if isinstance(snake_case_ , torch.Tensor ):
__a : Optional[int] = val.item()
__a : List[str] = f"{key}: {val:.6f}\n"
writer.write(snake_case_ )
if not save_generations:
return
if "preds" in metrics:
__a : Optional[Any] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(snake_case_ )
@rank_zero_only
def lowerCAmelCase (self : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
try:
__a : Union[str, Any] = pl_module.model.model.num_parameters()
except AttributeError:
__a : int = pl_module.model.num_parameters()
__a : Any = count_trainable_parameters(snake_case_ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCAmelCase (self : Optional[int] , snake_case_ : pl.Trainer , snake_case_ : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case_ , snake_case_ , '''test''' )
@rank_zero_only
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : pl.Trainer , snake_case_ : str ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 326
| 1
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
A : Tuple = HfArgumentParser(InitializationArguments)
A : str = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
A : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
A : Optional[int] = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
A : Dict = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
A : str = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 15
|
def __snake_case ( __magic_name__ ):
'''simple docstring'''
lowercase , lowercase = [], []
while len(__magic_name__ ) > 1:
lowercase , lowercase = min(__magic_name__ ), max(__magic_name__ )
start.append(__magic_name__ )
end.append(__magic_name__ )
collection.remove(__magic_name__ )
collection.remove(__magic_name__ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_snake_case : int = input("Enter numbers separated by a comma:\n").strip()
_snake_case : Dict = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 441
| 0
|
import math
def lowercase_ ( A__ , A__ = 0 , A__ = 0 ) -> list:
"""simple docstring"""
snake_case = end or len(A__ )
for i in range(A__ , A__ ):
snake_case = i
snake_case = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
snake_case = array[temp_index - 1]
temp_index -= 1
snake_case = temp_index_value
return array
def lowercase_ ( A__ , A__ , A__ ) -> None: # Max Heap
"""simple docstring"""
snake_case = index
snake_case = 2 * index + 1 # Left Node
snake_case = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
snake_case = left_index
if right_index < heap_size and array[largest] < array[right_index]:
snake_case = right_index
if largest != index:
snake_case , snake_case = array[largest], array[index]
heapify(A__ , A__ , A__ )
def lowercase_ ( A__ ) -> list:
"""simple docstring"""
snake_case = len(A__ )
for i in range(n // 2 , -1 , -1 ):
heapify(A__ , A__ , A__ )
for i in range(n - 1 , 0 , -1 ):
snake_case , snake_case = array[0], array[i]
heapify(A__ , 0 , A__ )
return array
def lowercase_ ( A__ , A__ , A__ , A__ ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowercase_ ( A__ , A__ , A__ , A__ ) -> int:
"""simple docstring"""
snake_case = low
snake_case = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
snake_case , snake_case = array[j], array[i]
i += 1
def lowercase_ ( A__ ) -> list:
"""simple docstring"""
if len(A__ ) == 0:
return array
snake_case = 2 * math.ceil(math.loga(len(A__ ) ) )
snake_case = 16
return intro_sort(A__ , 0 , len(A__ ) , A__ , A__ )
def lowercase_ ( A__ , A__ , A__ , A__ , A__ ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(A__ )
max_depth -= 1
snake_case = median_of_a(A__ , A__ , start + ((end - start) // 2) + 1 , end - 1 )
snake_case = partition(A__ , A__ , A__ , A__ )
intro_sort(A__ , A__ , A__ , A__ , A__ )
snake_case = p
return insertion_sort(A__ , A__ , A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = input("Enter numbers separated by a comma : ").strip()
_A = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 715
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase(self : List[str] ) -> Optional[int]:
snake_case = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=_A ).to(_A )
snake_case = AutoTokenizer.from_pretrained("google/mt5-small" )
snake_case = tokenizer("Hello there" , return_tensors="pt" ).input_ids
snake_case = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
snake_case = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss
snake_case = -(labels.shape[-1] * loss.item())
snake_case = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 294
| 0
|
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = 9.8_0_6_6_5
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = g ):
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 414
|
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : str = {}
_UpperCAmelCase : Optional[Any] = job["""started_at"""]
_UpperCAmelCase : List[Any] = job["""completed_at"""]
_UpperCAmelCase : Optional[int] = date_parser.parse(lowerCAmelCase_ )
_UpperCAmelCase : str = date_parser.parse(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_UpperCAmelCase : Tuple = start
_UpperCAmelCase : str = end
_UpperCAmelCase : List[Any] = duration_in_min
return job_info
def __A ( lowerCAmelCase_ , lowerCAmelCase_=None ):
_UpperCAmelCase : str = None
if token is not None:
_UpperCAmelCase : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
_UpperCAmelCase : Any = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
_UpperCAmelCase : Union[str, Any] = requests.get(lowerCAmelCase_ , headers=lowerCAmelCase_ ).json()
_UpperCAmelCase : int = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(lowerCAmelCase_ ) for job in result["""jobs"""]} )
_UpperCAmelCase : str = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(lowerCAmelCase_ ):
_UpperCAmelCase : Dict = requests.get(url + f"&page={i + 2}" , headers=lowerCAmelCase_ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(lowerCAmelCase_ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
lowerCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
lowerCAmelCase_ : Optional[int] = parser.parse_args()
lowerCAmelCase_ : int = get_job_time(args.workflow_run_id)
lowerCAmelCase_ : Optional[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"{k}: {v['duration']}")
| 414
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 409
|
import numpy as np
import qiskit
def a ( snake_case__: int = 8 , snake_case__: int | None = None ):
'''simple docstring'''
lowercase_ = np.random.default_rng(seed=snake_case__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
lowercase_ = 6 * key_len
# Measurement basis for Alice's qubits.
lowercase_ = rng.integers(2 , size=snake_case__ )
# The set of states Alice will prepare.
lowercase_ = rng.integers(2 , size=snake_case__ )
# Measurement basis for Bob's qubits.
lowercase_ = rng.integers(2 , size=snake_case__ )
# Quantum Circuit to simulate BB84
lowercase_ = qiskit.QuantumCircuit(snake_case__ , name='''BB84''' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(snake_case__ ):
if alice_state[index] == 1:
bbaa_circ.x(snake_case__ )
if alice_basis[index] == 1:
bbaa_circ.h(snake_case__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(snake_case__ ):
if bob_basis[index] == 1:
bbaa_circ.h(snake_case__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
lowercase_ = qiskit.Aer.get_backend('''aer_simulator''' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
lowercase_ = qiskit.execute(snake_case__ , snake_case__ , shots=1 , seed_simulator=snake_case__ )
# Returns the result of measurement.
lowercase_ = job.result().get_counts(snake_case__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
lowercase_ = ''''''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
snake_case__ , snake_case__ , snake_case__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
lowercase_ = gen_key[:key_len] if len(snake_case__ ) >= key_len else gen_key.ljust(snake_case__ , '''0''' )
return key
if __name__ == "__main__":
print(f"The generated key is : {bbaa(8, seed=0)}")
from doctest import testmod
testmod()
| 409
| 1
|
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
snake_case_ : int = logging.get_logger(__name__)
@add_end_docstrings(
snake_case_ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class lowercase__ ( snake_case_ ):
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : GenericTensor ):
'''simple docstring'''
if self.framework == "tf":
_UpperCamelCase : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_UpperCamelCase : int = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=__UpperCAmelCase )
else:
raise ValueError('Unsupported framework' )
return masked_index
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : GenericTensor ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.get_masked_index(__UpperCAmelCase )
_UpperCamelCase : str = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' ,self.model.base_model_prefix ,F'No mask_token ({self.tokenizer.mask_token}) found on the input' ,)
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : GenericTensor ):
'''simple docstring'''
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__UpperCAmelCase )
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[int]=None ,**lowerCamelCase__ : int ):
'''simple docstring'''
if return_tensors is None:
_UpperCamelCase : List[Any] = self.framework
_UpperCamelCase : Union[str, Any] = self.tokenizer(__UpperCAmelCase ,return_tensors=__UpperCAmelCase )
self.ensure_exactly_one_mask_token(__UpperCAmelCase )
return model_inputs
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.model(**__UpperCAmelCase )
_UpperCamelCase : Optional[Any] = model_inputs['input_ids']
return model_outputs
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any=5 ,lowerCamelCase__ : int=None ):
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
_UpperCamelCase : Dict = target_ids.shape[0]
_UpperCamelCase : Dict = model_outputs['input_ids'][0]
_UpperCamelCase : Tuple = model_outputs['logits']
if self.framework == "tf":
_UpperCamelCase : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_UpperCamelCase : Dict = outputs.numpy()
_UpperCamelCase : List[str] = outputs[0, masked_index, :]
_UpperCamelCase : Optional[int] = stable_softmax(__UpperCAmelCase ,axis=-1 )
if target_ids is not None:
_UpperCamelCase : Optional[Any] = tf.gather_nd(tf.squeeze(__UpperCAmelCase ,0 ) ,target_ids.reshape(-1 ,1 ) )
_UpperCamelCase : int = tf.expand_dims(__UpperCAmelCase ,0 )
_UpperCamelCase : Tuple = tf.math.top_k(__UpperCAmelCase ,k=__UpperCAmelCase )
_UpperCamelCase , _UpperCamelCase : str = topk.values.numpy(), topk.indices.numpy()
else:
_UpperCamelCase : Dict = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=__UpperCAmelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_UpperCamelCase : int = outputs[0, masked_index, :]
_UpperCamelCase : Optional[int] = logits.softmax(dim=-1 )
if target_ids is not None:
_UpperCamelCase : str = probs[..., target_ids]
_UpperCamelCase , _UpperCamelCase : Optional[int] = probs.topk(__UpperCAmelCase )
_UpperCamelCase : Tuple = []
_UpperCamelCase : List[str] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() ,predictions.tolist() ) ):
_UpperCamelCase : Optional[int] = []
for v, p in zip(_values ,_predictions ):
# Copy is important since we're going to modify this array in place
_UpperCamelCase : Dict = input_ids.numpy().copy()
if target_ids is not None:
_UpperCamelCase : Optional[Any] = target_ids[p].tolist()
_UpperCamelCase : int = p
# Filter padding out:
_UpperCamelCase : Optional[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_UpperCamelCase : int = self.tokenizer.decode(__UpperCAmelCase ,skip_special_tokens=__UpperCAmelCase )
_UpperCamelCase : Dict = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(__UpperCAmelCase )
result.append(__UpperCAmelCase )
if single_mask:
return result[0]
return result
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : Any=None ):
'''simple docstring'''
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
_UpperCamelCase : int = [targets]
try:
_UpperCamelCase : Dict = self.tokenizer.get_vocab()
except Exception:
_UpperCamelCase : List[str] = {}
_UpperCamelCase : List[str] = []
for target in targets:
_UpperCamelCase : Union[str, Any] = vocab.get(__UpperCAmelCase ,__UpperCAmelCase )
if id_ is None:
_UpperCamelCase : List[Any] = self.tokenizer(
__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_attention_mask=__UpperCAmelCase ,return_token_type_ids=__UpperCAmelCase ,max_length=1 ,truncation=__UpperCAmelCase ,)['input_ids']
if len(__UpperCAmelCase ) == 0:
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
'We cannot replace it with anything meaningful, ignoring it' )
continue
_UpperCamelCase : Any = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
F'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
_UpperCamelCase : List[Any] = list(set(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) == 0:
raise ValueError('At least one target must be provided when passed.' )
_UpperCamelCase : Optional[int] = np.array(__UpperCAmelCase )
return target_ids
def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : int=None ,lowerCamelCase__ : Optional[int]=None ):
'''simple docstring'''
_UpperCamelCase : Dict = {}
if targets is not None:
_UpperCamelCase : str = self.get_target_ids(__UpperCAmelCase ,__UpperCAmelCase )
_UpperCamelCase : Union[str, Any] = target_ids
if top_k is not None:
_UpperCamelCase : List[Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' ,self.model.base_model_prefix ,'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self : Dict ,lowerCamelCase__ : str ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : Any = super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and len(__UpperCAmelCase ) == 1:
return outputs[0]
return outputs
| 195
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = '''▁'''
lowerCamelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowerCamelCase_ = {
'''facebook/mbart-large-50-one-to-many-mmt''': 10_24,
}
# fmt: off
lowerCamelCase_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = ['''input_ids''', '''attention_mask''']
snake_case = []
snake_case = []
def __init__( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Dict="</s>" , __UpperCAmelCase : Dict="</s>" , __UpperCAmelCase : List[str]="<s>" , __UpperCAmelCase : Optional[int]="<unk>" , __UpperCAmelCase : Optional[int]="<pad>" , __UpperCAmelCase : Union[str, Any]="<mask>" , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Optional[int] , ):
'''simple docstring'''
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
_A = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
_A = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_A = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_A = 1
_A = len(self.sp_model )
_A = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__UpperCAmelCase )
}
_A = {v: k for k, v in self.lang_code_to_id.items()}
_A = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_A = src_lang if src_lang is not None else "en_XX"
_A = self.lang_code_to_id[self._src_lang]
_A = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ):
'''simple docstring'''
_A = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Any ):
'''simple docstring'''
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : Optional[Any] , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : str , __UpperCAmelCase : str ):
'''simple docstring'''
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_A = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : int ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = []
_A = ""
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
_A = True
_A = []
else:
current_sub_tokens.append(__UpperCAmelCase )
_A = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def lowerCAmelCase ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , "wb" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def lowerCAmelCase ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
_A = [1] * len(self.prefix_tokens )
_A = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] , __UpperCAmelCase : Optional[str] , **__UpperCAmelCase : List[Any] ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_A = src_lang
_A = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
_A = self.convert_tokens_to_ids(__UpperCAmelCase )
_A = tgt_lang_id
return inputs
def lowerCAmelCase ( self : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : str = "en_XX" , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : str = "ro_RO" , **__UpperCAmelCase : List[str] , ):
'''simple docstring'''
_A = src_lang
_A = tgt_lang
return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ):
'''simple docstring'''
_A = self.lang_code_to_id[src_lang]
_A = [self.cur_lang_code_id]
_A = [self.eos_token_id]
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str ):
'''simple docstring'''
_A = self.lang_code_to_id[tgt_lang]
_A = [self.cur_lang_code_id]
_A = [self.eos_token_id]
| 330
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a : Optional[Any] = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : int = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : int = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__a : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 414
|
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__a : int = 2_0_4_8
__a : Optional[int] = 4_0_9_6
__a : Optional[int] = 4_2
__a : Optional[Any] = os.environ.pop("""PROCESS_TRAIN""", """false""")
__a : Dict = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def __magic_name__ ( lowercase_ ) -> List[Any]:
'''simple docstring'''
def choose_first(lowercase_ , lowercase_=False ):
assert isinstance(lowercase_ , lowercase_ )
if len(lowercase_ ) == 1:
UpperCamelCase = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
UpperCamelCase = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
UpperCamelCase = {"id": example["id"]}
UpperCamelCase = example["annotations"]
UpperCamelCase = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
UpperCamelCase = ["yes"] if 1 in yes_no_answer else ["no"]
UpperCamelCase = UpperCamelCase = []
UpperCamelCase = UpperCamelCase = []
UpperCamelCase = ["<cls>"]
else:
UpperCamelCase = ["short"]
UpperCamelCase = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
UpperCamelCase = ["long"]
UpperCamelCase = choose_first(annotation["long_answer"] , is_long_answer=lowercase_ )
UpperCamelCase = []
answer.update(lowercase_ )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
UpperCamelCase = True
else:
UpperCamelCase = False
UpperCamelCase = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , lowercase_ ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def __magic_name__ ( lowercase_ , lowercase_=False ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = _get_single_answer(lowercase_ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCamelCase = example["document"]["tokens"]
UpperCamelCase = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(lowercase_ ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
UpperCamelCase = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
UpperCamelCase = example["document"]["tokens"]
UpperCamelCase = answer["start_token"]
UpperCamelCase = answer["end_token"]
UpperCamelCase = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
UpperCamelCase = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
UpperCamelCase = doc["is_html"][answer["start_token"] : answer["end_token"]]
UpperCamelCase = doc["token"][answer["start_token"] : answer["end_token"]]
UpperCamelCase = " ".join([old[i] for i in range(len(lowercase_ ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , lowercase_ , end="\n" )
print("Old:" , lowercase_ , end="\n\n" )
return {
"context": " ".join(lowercase_ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_=2048 , lowercase_=4096 , lowercase_=True ) -> int:
'''simple docstring'''
UpperCamelCase = get_context_and_ans(lowercase_ , assertion=lowercase_ )
UpperCamelCase = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
UpperCamelCase = tokenizer(example["question"]["text"] , out["context"] ).input_ids
UpperCamelCase = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = input_ids[:q_len]
UpperCamelCase = range(lowercase_ , len(lowercase_ ) , max_length - doc_stride )
for i in doc_start_indices:
UpperCamelCase = i + max_length - q_len
UpperCamelCase = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowercase_ ),
"end_token": [-100] * len(lowercase_ ),
"category": category,
},
}
UpperCamelCase = out["context"].split()
UpperCamelCase = splitted_context[answer["end_token"]]
UpperCamelCase = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=lowercase_ , ).input_ids )
UpperCamelCase = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=lowercase_ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
UpperCamelCase = len(tokenizer(lowercase_ , add_special_tokens=lowercase_ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
UpperCamelCase = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
UpperCamelCase = answer["start_token"]
UpperCamelCase = answer["end_token"]
if assertion:
UpperCamelCase = tokenizer.decode(lowercase_ )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , lowercase_ , end="\n\n" )
if len(lowercase_ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
UpperCamelCase = input_ids[:q_len]
UpperCamelCase = range(lowercase_ , len(lowercase_ ) , max_length - doc_stride )
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = [] # null, yes, no, long, short
for i in doc_start_indices:
UpperCamelCase = i + max_length - q_len
UpperCamelCase = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
UpperCamelCase = start_token - i + q_len
UpperCamelCase = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
UpperCamelCase = -100
UpperCamelCase = -100
answers_category.append("null" )
UpperCamelCase = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowercase_ )
answers_end_token.append(lowercase_ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(lowercase_ ) )
print("Old:" , tokenizer.decode(lowercase_ ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_=2048 , lowercase_=4096 , lowercase_=False ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = get_strided_contexts_and_ans(
lowercase_ , lowercase_ , doc_stride=lowercase_ , max_length=lowercase_ , assertion=lowercase_ , )
return example
def __magic_name__ ( lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
with jsonlines.open(lowercase_ , "a" ) as writer:
for example in tqdm(lowercase_ , total=len(lowercase_ ) , desc="Saving samples ... " ):
UpperCamelCase = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__a : Dict = load_dataset("""natural_questions""")
__a : int = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
__a : List[Any] = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
__a : Tuple = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
__a : int = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__a : Optional[int] = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
__a : List[Any] = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 414
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
_SCREAMING_SNAKE_CASE : int = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Union[PIL.Image.Image, np.ndarray]
class a ( __snake_case ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : PriorTransformer , __SCREAMING_SNAKE_CASE : CLIPVisionModel , __SCREAMING_SNAKE_CASE : CLIPImageProcessor , __SCREAMING_SNAKE_CASE : HeunDiscreteScheduler , __SCREAMING_SNAKE_CASE : ShapERenderer , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
prior=__SCREAMING_SNAKE_CASE , image_encoder=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , renderer=__SCREAMING_SNAKE_CASE , )
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
if latents is None:
lowerCamelCase_ = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowerCamelCase_ = latents.to(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int=0 ) -> Optional[int]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase_ = torch.device(F'''cuda:{gpu_id}''' )
lowerCamelCase_ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
def UpperCamelCase ( self : Dict ) -> Any:
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , ) -> int:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(__SCREAMING_SNAKE_CASE , axis=0 ) if image[0].ndim == 4 else torch.stack(__SCREAMING_SNAKE_CASE , axis=0 )
if not isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ):
lowerCamelCase_ = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase_ = image.to(dtype=self.image_encoder.dtype , device=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.image_encoder(__SCREAMING_SNAKE_CASE )['last_hidden_state']
lowerCamelCase_ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase_ = image_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ = torch.zeros_like(__SCREAMING_SNAKE_CASE )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__SCREAMING_SNAKE_CASE )
def __call__( self : List[str] , __SCREAMING_SNAKE_CASE : Union[PIL.Image.Image, List[PIL.Image.Image]] , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : int = 25 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : float = 4.0 , __SCREAMING_SNAKE_CASE : int = 64 , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[Any]:
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ):
lowerCamelCase_ = 1
elif isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ):
lowerCamelCase_ = image.shape[0]
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase_ = len(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__SCREAMING_SNAKE_CASE )}''' )
lowerCamelCase_ = self._execution_device
lowerCamelCase_ = batch_size * num_images_per_prompt
lowerCamelCase_ = guidance_scale > 1.0
lowerCamelCase_ = self._encode_image(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# prior
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.scheduler.timesteps
lowerCamelCase_ = self.prior.config.num_embeddings
lowerCamelCase_ = self.prior.config.embedding_dim
lowerCamelCase_ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase_ = latents.reshape(latents.shape[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(__SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.prior(
__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , proj_embedding=__SCREAMING_SNAKE_CASE , ).predicted_image_embedding
# remove the variance
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase_ = self.scheduler.step(
__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , sample=__SCREAMING_SNAKE_CASE , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = []
for i, latent in enumerate(__SCREAMING_SNAKE_CASE ):
print()
lowerCamelCase_ = self.renderer.decode(
latent[None, :] , __SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.stack(__SCREAMING_SNAKE_CASE )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
lowerCamelCase_ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase_ = [self.numpy_to_pil(__SCREAMING_SNAKE_CASE ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 549
|
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class a ( unittest.TestCase ):
@require_torch
def UpperCamelCase ( self : str ) -> str:
lowerCamelCase_ = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' )
lowerCamelCase_ = load_dataset('ashraq/esc50' )
lowerCamelCase_ = dataset['train']['audio'][-1]['array']
lowerCamelCase_ = audio_classifier(__SCREAMING_SNAKE_CASE , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'score': 0.501, 'label': 'Sound of a dog'}, {'score': 0.499, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF' )
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
pass
@slow
@require_torch
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
lowerCamelCase_ = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
lowerCamelCase_ = load_dataset('ashraq/esc50' )
lowerCamelCase_ = dataset['train']['audio'][-1]['array']
lowerCamelCase_ = audio_classifier(__SCREAMING_SNAKE_CASE , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
] , )
lowerCamelCase_ = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
lowerCamelCase_ = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF' )
def UpperCamelCase ( self : Optional[int] ) -> int:
pass
| 549
| 1
|
from __future__ import annotations
import queue
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase ):
_lowerCAmelCase = data
_lowerCAmelCase = None
_lowerCAmelCase = None
def UpperCAmelCase__ ( )->TreeNode:
print('''\n********Press N to stop entering at any point of time********\n''' )
_lowerCAmelCase = input('''Enter the value of the root node: ''' ).strip().lower()
_lowerCAmelCase = queue.Queue()
_lowerCAmelCase = TreeNode(int(_SCREAMING_SNAKE_CASE ) )
q.put(_SCREAMING_SNAKE_CASE )
while not q.empty():
_lowerCAmelCase = q.get()
_lowerCAmelCase = f'''Enter the left node of {node_found.data}: '''
_lowerCAmelCase = input(_SCREAMING_SNAKE_CASE ).strip().lower() or '''n'''
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(_SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = left_node
q.put(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = f'''Enter the right node of {node_found.data}: '''
_lowerCAmelCase = input(_SCREAMING_SNAKE_CASE ).strip().lower() or '''n'''
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(_SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = right_node
q.put(_SCREAMING_SNAKE_CASE )
raise
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TreeNode )->None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TreeNode )->None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TreeNode )->None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TreeNode )->None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(_SCREAMING_SNAKE_CASE )
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TreeNode )->None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(_SCREAMING_SNAKE_CASE )
while not q.empty():
_lowerCAmelCase = []
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TreeNode )->None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = n.left
# end of while means current node doesn't have left child
_lowerCAmelCase = stack.pop()
# start to traverse its right child
_lowerCAmelCase = n.right
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TreeNode )->None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n:
stack.append(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = n.left
_lowerCAmelCase = stack.pop()
print(n.data , end=''',''' )
_lowerCAmelCase = n.right
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TreeNode )->None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
_lowerCAmelCase , _lowerCAmelCase = [], []
_lowerCAmelCase = node
stacka.append(_SCREAMING_SNAKE_CASE )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCAmelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_SCREAMING_SNAKE_CASE )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "" , _SCREAMING_SNAKE_CASE : Optional[Any]=5_0 , _SCREAMING_SNAKE_CASE : Optional[int]="*" )->str:
if not s:
return "\n" + width * char
_lowerCAmelCase , _lowerCAmelCase = divmod(width - len(_SCREAMING_SNAKE_CASE ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
UpperCAmelCase_ = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 664
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = '''ClapFeatureExtractor'''
SCREAMING_SNAKE_CASE__ = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowerCAmelCase = kwargs.pop('''sampling_rate''' , _lowerCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
_lowerCAmelCase = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if audios is not None:
_lowerCAmelCase = self.feature_extractor(
_lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and audios is not None:
_lowerCAmelCase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 664
| 1
|
import math
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> str:
__A : Optional[int] = 0
__A : List[str] = 0
while num > 0:
__A : Optional[int] = num % 8
__A : List[Any] = octal + (remainder * math.floor(math.pow(10 ,a__ ) ))
counter += 1
__A : Any = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"""0o{int(a__ )}"""
def __SCREAMING_SNAKE_CASE ( ) -> None:
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(65 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(216 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(512 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main()
| 17
|
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def A__ ( A : Optional[int]):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def A__ ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Tuple = "mock-s3-bucket"
UpperCamelCase : List[str] = F'''s3://{mock_bucket}'''
UpperCamelCase : Optional[Any] = extract_path_from_uri(A)
assert dataset_path.startswith("s3://") is False
UpperCamelCase : Any = "./local/path"
UpperCamelCase : str = extract_path_from_uri(A)
assert dataset_path == new_dataset_path
def A__ ( A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : List[Any] = is_remote_filesystem(A)
assert is_remote is True
UpperCamelCase : Tuple = fsspec.filesystem("file")
UpperCamelCase : int = is_remote_filesystem(A)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , A)
def A__ ( A : List[Any] , A : Any , A : str , A : Union[str, Any] , A : List[str] , A : List[Any] , A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : List[str] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
UpperCamelCase : Any = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCamelCase : Any = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(A)
UpperCamelCase : List[str] = fsspec.filesystem(compression_fs_class.protocol , fo=A)
assert isinstance(A , A)
UpperCamelCase : List[Any] = os.path.basename(A)
UpperCamelCase : Union[str, Any] = expected_filename[: expected_filename.rindex(".")]
assert fs.glob("*") == [expected_filename]
with fs.open(A , "r" , encoding="utf-8") as f, open(A , encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"])
def A__ ( A : Optional[int] , A : str , A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : Any = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
UpperCamelCase : str = compressed_file_paths[protocol]
UpperCamelCase : Optional[int] = "dataset.jsonl"
UpperCamelCase : Tuple = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
UpperCamelCase , *UpperCamelCase : Dict = fsspec.get_fs_token_paths(A)
assert fs.isfile(A)
assert not fs.isfile("non_existing_" + member_file_path)
@pytest.mark.integration
def A__ ( A : Dict , A : List[str] , A : Dict , A : List[Any]):
'''simple docstring'''
UpperCamelCase : Optional[int] = hf_api.dataset_info(A , token=A)
UpperCamelCase : List[str] = HfFileSystem(repo_info=A , token=A)
assert sorted(hffs.glob("*")) == [".gitattributes", "data"]
assert hffs.isdir("data")
assert hffs.isfile(".gitattributes") and hffs.isfile("data/text_data.txt")
with open(A) as f:
assert hffs.open("data/text_data.txt" , "r").read() == f.read()
def A__ ( ):
'''simple docstring'''
UpperCamelCase : str = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(A , A , clobber=A)
with pytest.warns(A) as warning_info:
importlib.reload(datasets.filesystems)
assert len(A) == 1
assert (
str(warning_info[0].message)
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 173
| 0
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase__ = 4
lowerCAmelCase__ = 3
class lowercase ( _lowercase ):
"""simple docstring"""
pass
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
for shard in shards:
for i in range(UpperCAmelCase_ ):
yield {"i": i, "shard": shard}
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = int(os.environ['RANK'] )
_UpperCamelCase : str = int(os.environ['WORLD_SIZE'] )
_UpperCamelCase : List[str] = ArgumentParser()
parser.add_argument('--streaming' , type=UpperCAmelCase_ )
parser.add_argument('--local_rank' , type=UpperCAmelCase_ )
parser.add_argument('--num_workers' , type=UpperCAmelCase_ , default=0 )
_UpperCamelCase : Optional[Any] = parser.parse_args()
_UpperCamelCase : Optional[int] = args.streaming
_UpperCamelCase : int = args.num_workers
_UpperCamelCase : List[str] = {'shards': [F'''shard_{shard_idx}''' for shard_idx in range(UpperCAmelCase_ )]}
_UpperCamelCase : Tuple = IterableDataset.from_generator(UpperCAmelCase_ , gen_kwargs=UpperCAmelCase_ )
if not streaming:
_UpperCamelCase : List[Any] = Dataset.from_list(list(UpperCAmelCase_ ) )
_UpperCamelCase : int = split_dataset_by_node(UpperCAmelCase_ , rank=UpperCAmelCase_ , world_size=UpperCAmelCase_ )
_UpperCamelCase : Any = torch.utils.data.DataLoader(UpperCAmelCase_ , num_workers=UpperCAmelCase_ )
_UpperCamelCase : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD
_UpperCamelCase : Union[str, Any] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
_UpperCamelCase : int = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 648
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Tuple = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = embedding_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Tuple = num_choices
_UpperCamelCase : List[str] = scope
def A__ ( self):
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Any = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = self.num_choices
_UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Union[str, Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case)
if return_labels:
if model_class in get_values(__snake_case):
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case)
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
return inputs_dict
def A__ ( self):
_UpperCamelCase : Any = MegatronBertModelTester(self)
_UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case)
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.')
def A__ ( self):
_UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case)
_UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case)
model.to(__snake_case)
model.half()
_UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)[0]
_UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24))
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
_UpperCamelCase : Optional[Any] = output[0, ii, jj]
_UpperCamelCase : Dict = expected[3 * ii + jj]
_UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case)
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
| 648
| 1
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : str , __snake_case : List[Any]=False ):
if isinstance(__snake_case , __snake_case ) and isinstance(__snake_case , __snake_case ):
_A = len(set_a.intersection(__snake_case ) )
if alternative_union:
_A = len(__snake_case ) + len(__snake_case )
else:
_A = len(set_a.union(__snake_case ) )
return intersection / union
if isinstance(__snake_case , (list, tuple) ) and isinstance(__snake_case , (list, tuple) ):
_A = [element for element in set_a if element in set_b]
if alternative_union:
_A = len(__snake_case ) + len(__snake_case )
return len(__snake_case ) / union
else:
_A = set_a + [element for element in set_b if element not in set_a]
return len(__snake_case ) / len(__snake_case )
return len(__snake_case ) / len(__snake_case )
return None
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = {'''a''', '''b''', '''c''', '''d''', '''e'''}
_UpperCAmelCase : str = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 107
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__lowerCAmelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
def _UpperCAmelCase ( __A : str , __A : Optional[Any]=1_00 , __A : int=" " ):
a_ : Optional[int] = text.split(__A )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__A ) , __A )]
def _UpperCAmelCase ( __A : dict ):
a_ , a_ : List[Any] = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(__A ):
titles.append(title if title is not None else '''''' )
texts.append(__A )
return {"title": titles, "text": texts}
def _UpperCAmelCase ( __A : dict , __A : DPRContextEncoder , __A : DPRContextEncoderTokenizerFast ):
a_ : int = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=__A , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
a_ : List[Any] = ctx_encoder(input_ids.to(device=__A ) , return_dict=__A ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _UpperCAmelCase ( __A : "RagExampleArguments" , __A : "ProcessingArguments" , __A : "IndexHnswArguments" , ):
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
a_ : List[str] = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
a_ : Dict = dataset.map(__A , batched=__A , num_proc=processing_args.num_proc )
# And compute the embeddings
a_ : str = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__A )
a_ : Tuple = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
a_ : Union[str, Any] = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
a_ : Union[str, Any] = dataset.map(
partial(__A , ctx_encoder=__A , ctx_tokenizer=__A ) , batched=__A , batch_size=processing_args.batch_size , features=__A , )
# And finally save your dataset
a_ : Tuple = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(__A )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
a_ : Dict = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=__A )
# And save the index
a_ : Optional[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(__A )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
snake_case__ = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
snake_case__ = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
snake_case__ = field(
default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
snake_case__ = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
snake_case__ = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__lowerCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 466
| 0
|
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
return EnvironmentCommand()
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class a_ ( _UpperCAmelCase ):
@staticmethod
def _snake_case ( __UpperCamelCase : ArgumentParser ) ->int:
'''simple docstring'''
_UpperCAmelCase = parser.add_parser("""env""" )
download_parser.set_defaults(func=__UpperCamelCase )
download_parser.add_argument(
"""--accelerate-config_file""" , default=__UpperCamelCase , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=__UpperCamelCase )
def __init__( self : Any , __UpperCamelCase : List[Any] , *__UpperCamelCase : Any ) ->None:
'''simple docstring'''
_UpperCAmelCase = accelerate_config_file
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = """not installed"""
if is_safetensors_available():
import safetensors
_UpperCAmelCase = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_UpperCAmelCase = f"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
_UpperCAmelCase = """not installed"""
_UpperCAmelCase = _UpperCAmelCase = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_UpperCAmelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(__UpperCamelCase ):
_UpperCAmelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
_UpperCAmelCase = (
"""\n""".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(__UpperCamelCase , __UpperCamelCase )
else f"""\t{accelerate_config}"""
)
_UpperCAmelCase = """not installed"""
_UpperCAmelCase = """NA"""
if is_torch_available():
import torch
_UpperCAmelCase = torch.__version__
_UpperCAmelCase = torch.cuda.is_available()
_UpperCAmelCase = """not installed"""
_UpperCAmelCase = """NA"""
if is_tf_available():
import tensorflow as tf
_UpperCAmelCase = tf.__version__
try:
# deprecated in v2.1
_UpperCAmelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_UpperCAmelCase = bool(tf.config.list_physical_devices("""GPU""" ) )
_UpperCAmelCase = """not installed"""
_UpperCAmelCase = """not installed"""
_UpperCAmelCase = """not installed"""
_UpperCAmelCase = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_UpperCAmelCase = flax.__version__
_UpperCAmelCase = jax.__version__
_UpperCAmelCase = jaxlib.__version__
_UpperCAmelCase = jax.lib.xla_bridge.get_backend().platform
_UpperCAmelCase = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f"""{safetensors_version}""",
"""Accelerate version""": f"""{accelerate_version}""",
"""Accelerate config""": f"""{accelerate_config_str}""",
"""PyTorch version (GPU?)""": f"""{pt_version} ({pt_cuda_available})""",
"""Tensorflow version (GPU?)""": f"""{tf_version} ({tf_cuda_available})""",
"""Flax version (CPU?/GPU?/TPU?)""": f"""{flax_version} ({jax_backend})""",
"""Jax version""": f"""{jax_version}""",
"""JaxLib version""": f"""{jaxlib_version}""",
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(__UpperCamelCase ) )
return info
@staticmethod
def _snake_case ( __UpperCamelCase : Union[str, Any] ) ->Dict:
'''simple docstring'''
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 19
|
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a_ ( nn.Module ):
def __init__( self : List[str] , __UpperCamelCase : int = 16 , __UpperCamelCase : int = 88 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 32 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "geglu" , __UpperCamelCase : Optional[int] = None , ) ->Dict:
'''simple docstring'''
super().__init__()
_UpperCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__UpperCamelCase , attention_head_dim=__UpperCamelCase , in_channels=__UpperCamelCase , num_layers=__UpperCamelCase , dropout=__UpperCamelCase , norm_num_groups=__UpperCamelCase , cross_attention_dim=__UpperCamelCase , attention_bias=__UpperCamelCase , sample_size=__UpperCamelCase , num_vector_embeds=__UpperCamelCase , activation_fn=__UpperCamelCase , num_embeds_ada_norm=__UpperCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase = [1, 0]
def _snake_case ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : bool = True , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = hidden_states
_UpperCAmelCase = []
_UpperCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase = self.transformer_index_for_condition[i]
_UpperCAmelCase = self.transformers[transformer_index](
__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase , cross_attention_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__UpperCamelCase )
| 19
| 1
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Tuple):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : Dict = 3
__lowerCamelCase : Tuple = (3_2, 3_2)
__lowerCamelCase : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0)).to(SCREAMING_SNAKE_CASE__)
return image
@property
def lowerCAmelCase ( self : Any):
torch.manual_seed(0)
__lowerCamelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=3_2 ,)
return model
@property
def lowerCAmelCase ( self : Optional[Any]):
torch.manual_seed(0)
__lowerCamelCase : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
return model
@property
def lowerCAmelCase ( self : Optional[Any]):
torch.manual_seed(0)
__lowerCamelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModel(SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : Tuple):
def extract(*SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : Dict):
class A_ :
def __init__( self : Optional[int]):
__lowerCamelCase : Any = torch.ones([0])
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any):
self.pixel_values.to(SCREAMING_SNAKE_CASE__)
return self
return Out()
return extract
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Tuple = self.dummy_cond_unet
__lowerCamelCase : Any = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Optional[Any] = self.dummy_vae
__lowerCamelCase : Tuple = self.dummy_text_encoder
__lowerCamelCase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
__lowerCamelCase : Union[str, Any] = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ ,vae=SCREAMING_SNAKE_CASE__ ,text_encoder=SCREAMING_SNAKE_CASE__ ,tokenizer=SCREAMING_SNAKE_CASE__ ,safety_checker=SCREAMING_SNAKE_CASE__ ,feature_extractor=self.dummy_extractor ,)
__lowerCamelCase : Optional[int] = sd_pipe.to(SCREAMING_SNAKE_CASE__)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = 'A painting of a squirrel eating a burger'
__lowerCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(0)
__lowerCamelCase : List[Any] = sd_pipe([prompt] ,generator=SCREAMING_SNAKE_CASE__ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='np')
__lowerCamelCase : List[Any] = output.images
__lowerCamelCase : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(0)
__lowerCamelCase : Union[str, Any] = sd_pipe(
[prompt] ,generator=SCREAMING_SNAKE_CASE__ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='np' ,return_dict=SCREAMING_SNAKE_CASE__ ,)[0]
__lowerCamelCase : Any = image[0, -3:, -3:, -1]
__lowerCamelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Union[str, Any] = self.dummy_cond_unet
__lowerCamelCase : Union[str, Any] = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = self.dummy_vae
__lowerCamelCase : Optional[Any] = self.dummy_text_encoder
__lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
__lowerCamelCase : List[Any] = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ ,vae=SCREAMING_SNAKE_CASE__ ,text_encoder=SCREAMING_SNAKE_CASE__ ,tokenizer=SCREAMING_SNAKE_CASE__ ,safety_checker=SCREAMING_SNAKE_CASE__ ,feature_extractor=self.dummy_extractor ,)
__lowerCamelCase : List[str] = sd_pipe.to(SCREAMING_SNAKE_CASE__)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = 'A painting of a squirrel eating a burger'
__lowerCamelCase : Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(0)
__lowerCamelCase : str = sd_pipe([prompt] ,generator=SCREAMING_SNAKE_CASE__ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='np')
__lowerCamelCase : Optional[Any] = output.images
__lowerCamelCase : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(0)
__lowerCamelCase : Union[str, Any] = sd_pipe(
[prompt] ,generator=SCREAMING_SNAKE_CASE__ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='np' ,return_dict=SCREAMING_SNAKE_CASE__ ,)[0]
__lowerCamelCase : Any = image[0, -3:, -3:, -1]
__lowerCamelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : int = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Dict = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' ,safety_checker=SCREAMING_SNAKE_CASE__)
assert isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
assert isinstance(pipe.scheduler ,SCREAMING_SNAKE_CASE__)
assert pipe.safety_checker is None
__lowerCamelCase : List[Any] = pipe('example prompt' ,num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowerCamelCase : List[Any] = pipe('example prompt' ,num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' ,'This test requires a GPU')
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Union[str, Any] = self.dummy_cond_unet
__lowerCamelCase : Optional[Any] = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.dummy_vae
__lowerCamelCase : List[Any] = self.dummy_text_encoder
__lowerCamelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# put models in fp16
__lowerCamelCase : str = unet.half()
__lowerCamelCase : Union[str, Any] = vae.half()
__lowerCamelCase : str = bert.half()
# make sure here that pndm scheduler skips prk
__lowerCamelCase : str = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ ,vae=SCREAMING_SNAKE_CASE__ ,text_encoder=SCREAMING_SNAKE_CASE__ ,tokenizer=SCREAMING_SNAKE_CASE__ ,safety_checker=SCREAMING_SNAKE_CASE__ ,feature_extractor=self.dummy_extractor ,)
__lowerCamelCase : Any = sd_pipe.to(SCREAMING_SNAKE_CASE__)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = 'A painting of a squirrel eating a burger'
__lowerCamelCase : Tuple = sd_pipe([prompt] ,num_inference_steps=2 ,output_type='np').images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : int):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : int):
__lowerCamelCase : Tuple = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' ,safety_checker=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
__lowerCamelCase : Any = sd_pipe.to(SCREAMING_SNAKE_CASE__)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
__lowerCamelCase : Union[str, Any] = 4_0_0_3_6_6_0_3_4_6
__lowerCamelCase : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
__lowerCamelCase : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = sd_pipe(
[prompt] ,generator=SCREAMING_SNAKE_CASE__ ,guidance_scale=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5_0 ,output_type='np' ,width=5_1_2 ,height=5_1_2 ,sld_guidance_scale=0 ,)
__lowerCamelCase : str = output.images
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : str = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
# without safety guidance (strong configuration)
__lowerCamelCase : Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = sd_pipe(
[prompt] ,generator=SCREAMING_SNAKE_CASE__ ,guidance_scale=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5_0 ,output_type='np' ,width=5_1_2 ,height=5_1_2 ,sld_guidance_scale=2_0_0_0 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
__lowerCamelCase : Union[str, Any] = output.images
__lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : List[str] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Union[str, Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' ,safety_checker=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
__lowerCamelCase : Optional[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = 'padme amidala taking a bath artwork, safe for work, no nudity'
__lowerCamelCase : Optional[int] = 2_7_3_4_9_7_1_7_5_5
__lowerCamelCase : List[str] = 7
__lowerCamelCase : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = sd_pipe(
[prompt] ,generator=SCREAMING_SNAKE_CASE__ ,guidance_scale=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5_0 ,output_type='np' ,width=5_1_2 ,height=5_1_2 ,sld_guidance_scale=0 ,)
__lowerCamelCase : Union[str, Any] = output.images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
__lowerCamelCase : int = torch.manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = sd_pipe(
[prompt] ,generator=SCREAMING_SNAKE_CASE__ ,guidance_scale=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5_0 ,output_type='np' ,width=5_1_2 ,height=5_1_2 ,sld_guidance_scale=2_0_0_0 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
__lowerCamelCase : Any = output.images
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : List[Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[str] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5')
__lowerCamelCase : List[str] = sd_pipe.to(SCREAMING_SNAKE_CASE__)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
__lowerCamelCase : Any = 1_0_4_4_3_5_5_2_3_4
__lowerCamelCase : Optional[Any] = 1_2
__lowerCamelCase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = sd_pipe(
[prompt] ,generator=SCREAMING_SNAKE_CASE__ ,guidance_scale=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5_0 ,output_type='np' ,width=5_1_2 ,height=5_1_2 ,sld_guidance_scale=0 ,)
__lowerCamelCase : int = output.images
__lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-7
__lowerCamelCase : int = torch.manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = sd_pipe(
[prompt] ,generator=SCREAMING_SNAKE_CASE__ ,guidance_scale=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5_0 ,output_type='np' ,width=5_1_2 ,height=5_1_2 ,sld_guidance_scale=2_0_0_0 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
__lowerCamelCase : Optional[Any] = output.images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
__lowerCamelCase : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561])
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 652
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__="pt" ) -> Dict:
__lowerCamelCase : Any = {'add_prefix_space': True} if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not line.startswith(' ' ) else {}
__lowerCamelCase : int = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase__ , padding='max_length' if pad_to_max_length else None , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ) -> List[str]:
__lowerCamelCase : List[str] = input_ids.ne(lowerCamelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]="train" ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : List[Any]="" ,):
super().__init__()
__lowerCamelCase : Optional[Any] = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.source')
__lowerCamelCase : Any = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.target')
__lowerCamelCase : List[Any] = self.get_char_lens(self.src_file)
__lowerCamelCase : List[Any] = max_source_length
__lowerCamelCase : List[str] = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
__lowerCamelCase : Any = tokenizer
__lowerCamelCase : Optional[int] = prefix
if n_obs is not None:
__lowerCamelCase : Dict = self.src_lens[:n_obs]
__lowerCamelCase : str = src_lang
__lowerCamelCase : Any = tgt_lang
def __len__( self : Tuple):
return len(self.src_lens)
def __getitem__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Dict = index + 1 # linecache starts at 1
__lowerCamelCase : Any = self.prefix + linecache.getline(str(self.src_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n')
__lowerCamelCase : int = linecache.getline(str(self.tgt_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__lowerCamelCase : Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer
)
__lowerCamelCase : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer
__lowerCamelCase : List[str] = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_source_length ,'right')
__lowerCamelCase : Any = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_target_length ,'right')
__lowerCamelCase : List[Any] = source_inputs['input_ids'].squeeze()
__lowerCamelCase : Tuple = target_inputs['input_ids'].squeeze()
__lowerCamelCase : Tuple = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int):
return [len(SCREAMING_SNAKE_CASE__) for x in Path(SCREAMING_SNAKE_CASE__).open().readlines()]
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Optional[Any] = torch.stack([x['input_ids'] for x in batch])
__lowerCamelCase : Any = torch.stack([x['attention_mask'] for x in batch])
__lowerCamelCase : Union[str, Any] = torch.stack([x['decoder_input_ids'] for x in batch])
__lowerCamelCase : Optional[int] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__)
else self.tokenizer.pad_token_id
)
__lowerCamelCase : int = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__)
else self.tokenizer.pad_token_id
)
__lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
a =getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
return list(itertools.chain.from_iterable(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
__lowerCamelCase : str = get_git_info()
save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , 'git_log.json' ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4 , **lowerCamelCase__ ) -> List[str]:
with open(lowerCamelCase__ , 'w' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=lowerCamelCase__ , **lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
with open(lowerCamelCase__ ) as f:
return json.load(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
__lowerCamelCase : str = git.Repo(search_parent_directories=lowerCamelCase__ )
__lowerCamelCase : Any = {
'repo_id': str(lowerCamelCase__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List:
return list(map(lowerCamelCase__ , lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
with open(lowerCamelCase__ , 'wb' ) as f:
return pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
def remove_articles(lowerCamelCase__ ):
return re.sub(R'\b(a|an|the)\b' , ' ' , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
__lowerCamelCase : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : str = normalize_answer(lowerCamelCase__ ).split()
__lowerCamelCase : Optional[int] = normalize_answer(lowerCamelCase__ ).split()
__lowerCamelCase : Union[str, Any] = Counter(lowerCamelCase__ ) & Counter(lowerCamelCase__ )
__lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
__lowerCamelCase : List[Any] = 1.0 * num_same / len(lowerCamelCase__ )
__lowerCamelCase : int = 1.0 * num_same / len(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
return normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
__lowerCamelCase : Dict = 0
for hypo, pred in zip(lowerCamelCase__ , lowerCamelCase__ ):
em += exact_match_score(lowerCamelCase__ , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
em /= len(lowerCamelCase__ )
return {"em": em}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
return model_prefix.startswith('rag' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__lowerCamelCase : List[str] = 'dropout_rate'
for p in extra_params:
if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and not hasattr(lowerCamelCase__ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
continue
__lowerCamelCase : List[Any] = p if hasattr(lowerCamelCase__ , lowerCamelCase__ ) else equivalent_param[p]
setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
return hparams, config
| 652
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :List[Any] = 'nllb-moe'
a :Optional[int] = ['past_key_values']
a :Dict = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : List[Any]=1_2_8_1_1_2 , SCREAMING_SNAKE_CASE_ : List[str]=1_0_2_4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE_ : int=4_0_9_6 , SCREAMING_SNAKE_CASE_ : int=1_6 , SCREAMING_SNAKE_CASE_ : List[Any]=1_2 , SCREAMING_SNAKE_CASE_ : List[Any]=4_0_9_6 , SCREAMING_SNAKE_CASE_ : List[str]=1_6 , SCREAMING_SNAKE_CASE_ : Dict=0.05 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.05 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[int]="relu" , SCREAMING_SNAKE_CASE_ : Dict=1_0_2_4 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : str=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : List[Any]="float32" , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_2_8 , SCREAMING_SNAKE_CASE_ : int=6_4 , SCREAMING_SNAKE_CASE_ : List[str]=4 , SCREAMING_SNAKE_CASE_ : Optional[int]=4 , SCREAMING_SNAKE_CASE_ : Dict=0.0_01 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0_01 , SCREAMING_SNAKE_CASE_ : List[Any]="all" , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : List[Any]=1.0 , SCREAMING_SNAKE_CASE_ : Any=0.2 , SCREAMING_SNAKE_CASE_ : str=1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE_ : Dict=2 , SCREAMING_SNAKE_CASE_ : str=False , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> Union[str, Any]:
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
lowercase_ = d_model
lowercase_ = encoder_ffn_dim
lowercase_ = encoder_layers
lowercase_ = encoder_attention_heads
lowercase_ = decoder_ffn_dim
lowercase_ = decoder_layers
lowercase_ = decoder_attention_heads
lowercase_ = dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = activation_function
lowercase_ = init_std
lowercase_ = encoder_layerdrop
lowercase_ = decoder_layerdrop
lowercase_ = use_cache
lowercase_ = encoder_layers
lowercase_ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ = router_z_loss_coef
lowercase_ = router_aux_loss_coef
lowercase_ = decoder_sparse_step
lowercase_ = encoder_sparse_step
lowercase_ = num_experts
lowercase_ = expert_capacity
lowercase_ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
lowercase_ = router_dtype
lowercase_ = router_ignore_padding_tokens
lowercase_ = batch_prioritized_routing
lowercase_ = second_expert_policy
lowercase_ = normalize_router_prob_before_dropping
lowercase_ = moe_eval_capacity_token_fraction
lowercase_ = moe_token_dropout
lowercase_ = output_router_logits
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
| 409
|
def a ( snake_case__: int ):
'''simple docstring'''
lowercase_ = [0] * len(snake_case__ )
lowercase_ = []
lowercase_ = []
lowercase_ = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
lowercase_ = queue.pop(0 )
cnt += 1
topo.append(snake_case__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(snake_case__ )
if cnt != len(snake_case__ ):
print('''Cycle exists''' )
else:
print(snake_case__ )
# Adjacency List of Graph
__a = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 409
| 1
|
def SCREAMING_SNAKE_CASE ( snake_case ) -> bool:
if not isinstance(snake_case , snake_case ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(snake_case ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(snake_case ) == 1:
return True
__lowercase = series[1] - series[0]
for index in range(len(snake_case ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def SCREAMING_SNAKE_CASE ( snake_case ) -> float:
if not isinstance(snake_case , snake_case ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(snake_case ) == 0:
raise ValueError('Input list must be a non empty list' )
__lowercase = 0
for val in series:
answer += val
return answer / len(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 375
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
SCREAMING_SNAKE_CASE_ : str = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def SCREAMING_SNAKE_CASE ( snake_case ) -> Tuple:
__lowercase = {}
state_dict.pop('pixel_mean' , snake_case )
state_dict.pop('pixel_std' , snake_case )
__lowercase = r'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowercase = key.replace(snake_case , snake_case )
if re.match(snake_case , snake_case ):
__lowercase = int(re.match(snake_case , snake_case ).group(2 ) )
if layer_nb == 0:
__lowercase = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
__lowercase = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
__lowercase = key.replace('layers.2' , 'proj_out' )
__lowercase = value
__lowercase = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case="ybelkada/segment-anything" ) -> int:
__lowercase = hf_hub_download(snake_case , F"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
__lowercase = SamConfig()
elif "sam_vit_l" in model_name:
__lowercase = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__lowercase = SamConfig(
vision_config=snake_case , )
elif "sam_vit_h" in model_name:
__lowercase = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__lowercase = SamConfig(
vision_config=snake_case , )
__lowercase = torch.load(snake_case , map_location='cpu' )
__lowercase = replace_keys(snake_case )
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(image_processor=snake_case )
__lowercase = SamModel(snake_case )
hf_model.load_state_dict(snake_case )
__lowercase = hf_model.to('cuda' )
__lowercase = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
__lowercase = Image.open(requests.get(snake_case , stream=snake_case ).raw ).convert('RGB' )
__lowercase = [[[400, 650]]]
__lowercase = [[1]]
__lowercase = processor(images=np.array(snake_case ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
__lowercase = processor(
images=np.array(snake_case ) , input_points=snake_case , input_labels=snake_case , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
__lowercase = ((75, 275, 1_725, 850),)
__lowercase = processor(images=np.array(snake_case ) , input_boxes=snake_case , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
__lowercase = [[[400, 650], [800, 650]]]
__lowercase = [[1, 1]]
__lowercase = processor(
images=np.array(snake_case ) , input_points=snake_case , input_labels=snake_case , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[Any] = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 375
| 1
|
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] ):
'''simple docstring'''
if isinstance(_lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
lowercase__ : Union[str, Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowercase__ : int = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
lowercase__ : str = np.concatenate(_lowerCAmelCase , axis=0 )
lowercase__ : Union[str, Any] = np.array(_lowerCAmelCase ).astype(np.floataa ) / 255.0
lowercase__ : Union[str, Any] = image.transpose(0 , 3 , 1 , 2 )
lowercase__ : Optional[int] = 2.0 * image - 1.0
lowercase__ : Union[str, Any] = torch.from_numpy(_lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
lowercase__ : Optional[int] = torch.cat(_lowerCAmelCase , dim=0 )
return image
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple=0.9_9_9_5 ):
'''simple docstring'''
if not isinstance(_lowerCAmelCase , np.ndarray ):
lowercase__ : Optional[int] = True
lowercase__ : str = va.device
lowercase__ : Tuple = va.cpu().numpy()
lowercase__ : Any = va.cpu().numpy()
lowercase__ : Dict = np.sum(va * va / (np.linalg.norm(_lowerCAmelCase ) * np.linalg.norm(_lowerCAmelCase )) )
if np.abs(_lowerCAmelCase ) > DOT_THRESHOLD:
lowercase__ : Optional[int] = (1 - t) * va + t * va
else:
lowercase__ : Dict = np.arccos(_lowerCAmelCase )
lowercase__ : Optional[Any] = np.sin(_lowerCAmelCase )
lowercase__ : Union[str, Any] = theta_a * t
lowercase__ : List[Any] = np.sin(_lowerCAmelCase )
lowercase__ : str = np.sin(theta_a - theta_t ) / sin_theta_a
lowercase__ : Any = sin_theta_t / sin_theta_a
lowercase__ : Optional[Any] = sa * va + sa * va
if inputs_are_torch:
lowercase__ : List[str] = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
return va
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
'''simple docstring'''
lowercase__ : Any = F.normalize(_lowerCAmelCase , dim=-1 )
lowercase__ : Dict = F.normalize(_lowerCAmelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
for param in model.parameters():
lowercase__ : Optional[int] = value
class UpperCAmelCase_ ( _a):
def __init__( self , a , a , a , a , a , a , a , a=None , a=None , a=None , ) -> Any:
super().__init__()
self.register_modules(
vae=a , text_encoder=a , clip_model=a , tokenizer=a , unet=a , scheduler=a , feature_extractor=a , coca_model=a , coca_tokenizer=a , coca_transform=a , )
lowercase__ : Tuple = (
feature_extractor.size
if isinstance(feature_extractor.size , a )
else feature_extractor.size['shortest_edge']
)
lowercase__ : Optional[int] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , a )
set_requires_grad(self.clip_model , a )
def _UpperCAmelCase ( self , a = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a )
def _UpperCAmelCase ( self ) -> Dict:
self.enable_attention_slicing(a )
def _UpperCAmelCase ( self ) -> Any:
set_requires_grad(self.vae , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
set_requires_grad(self.vae , a )
def _UpperCAmelCase ( self ) -> int:
set_requires_grad(self.unet , a )
def _UpperCAmelCase ( self ) -> Any:
set_requires_grad(self.unet , a )
def _UpperCAmelCase ( self , a , a , a ) -> int:
# get the original timestep using init_timestep
lowercase__ : int = min(int(num_inference_steps * strength ) , a )
lowercase__ : Optional[Any] = max(num_inference_steps - init_timestep , 0 )
lowercase__ : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _UpperCAmelCase ( self , a , a , a , a , a , a=None ) -> Tuple:
if not isinstance(a , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(a )}""" )
lowercase__ : str = image.to(device=a , dtype=a )
if isinstance(a , a ):
lowercase__ : Optional[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
lowercase__ : Tuple = torch.cat(a , dim=0 )
else:
lowercase__ : Any = self.vae.encode(a ).latent_dist.sample(a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase__ : int = 0.18_215 * init_latents
lowercase__ : int = init_latents.repeat_interleave(a , dim=0 )
lowercase__ : Dict = randn_tensor(init_latents.shape , generator=a , device=a , dtype=a )
# get latents
lowercase__ : int = self.scheduler.add_noise(a , a , a )
lowercase__ : List[Any] = init_latents
return latents
def _UpperCAmelCase ( self , a ) -> List[str]:
lowercase__ : List[Any] = self.coca_transform(a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowercase__ : Any = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
lowercase__ : Optional[int] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def _UpperCAmelCase ( self , a , a ) -> List[Any]:
lowercase__ : int = self.feature_extractor.preprocess(a )
lowercase__ : Optional[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
lowercase__ : List[str] = self.clip_model.get_image_features(a )
lowercase__ : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a )
lowercase__ : Optional[Any] = image_embeddings_clip.repeat_interleave(a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _UpperCAmelCase ( self , a , a , a , a , a , a , a , ) -> str:
lowercase__ : Tuple = latents.detach().requires_grad_()
lowercase__ : Dict = self.scheduler.scale_model_input(a , a )
# predict the noise residual
lowercase__ : int = self.unet(a , a , encoder_hidden_states=a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowercase__ : int = self.scheduler.alphas_cumprod[timestep]
lowercase__ : Tuple = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ : Any = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowercase__ : List[str] = torch.sqrt(a )
lowercase__ : Any = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , a ):
lowercase__ : Dict = self.scheduler.sigmas[index]
lowercase__ : List[str] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase__ : str = 1 / 0.18_215 * sample
lowercase__ : int = self.vae.decode(a ).sample
lowercase__ : int = (image / 2 + 0.5).clamp(0 , 1 )
lowercase__ : Dict = transforms.Resize(self.feature_extractor_size )(a )
lowercase__ : Optional[int] = self.normalize(a ).to(latents.dtype )
lowercase__ : str = self.clip_model.get_image_features(a )
lowercase__ : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a )
lowercase__ : Dict = spherical_dist_loss(a , a ).mean() * clip_guidance_scale
lowercase__ : Tuple = -torch.autograd.grad(a , a )[0]
if isinstance(self.scheduler , a ):
lowercase__ : int = latents.detach() + grads * (sigma**2)
lowercase__ : Optional[Any] = noise_pred_original
else:
lowercase__ : List[Any] = noise_pred_original - torch.sqrt(a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , a , a , a = None , a = None , a = 5_1_2 , a = 5_1_2 , a = 0.6 , a = 5_0 , a = 7.5 , a = 1 , a = 0.0 , a = 1_0_0 , a = None , a = "pil" , a = True , a = 0.8 , a = 0.1 , a = 0.1 , ) -> Dict:
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(a )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(a , torch.Generator ) and batch_size > 1:
lowercase__ : Optional[int] = [generator] + [None] * (batch_size - 1)
lowercase__ : Tuple = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
lowercase__ : Optional[Any] = [x[0] for x in coca_is_none if x[1]]
lowercase__ : Dict = ', '.join(a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(a ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
lowercase__ : int = self.get_image_description(a )
if style_prompt is None:
if len(a ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
lowercase__ : List[str] = self.get_image_description(a )
# get prompt text embeddings for content and style
lowercase__ : Any = self.tokenizer(
a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=a , return_tensors='pt' , )
lowercase__ : Optional[Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowercase__ : Tuple = self.tokenizer(
a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=a , return_tensors='pt' , )
lowercase__ : Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowercase__ : Union[str, Any] = slerp(a , a , a )
# duplicate text embeddings for each generation per prompt
lowercase__ : Any = text_embeddings.repeat_interleave(a , dim=0 )
# set timesteps
lowercase__ : List[str] = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowercase__ : Optional[Any] = {}
if accepts_offset:
lowercase__ : Dict = 1
self.scheduler.set_timesteps(a , **a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowercase__ : Optional[int] = self.get_timesteps(a , a , self.device )
lowercase__ : List[Any] = timesteps[:1].repeat(a )
# Preprocess image
lowercase__ : Tuple = preprocess(a , a , a )
lowercase__ : int = self.prepare_latents(
a , a , a , text_embeddings.dtype , self.device , a )
lowercase__ : List[Any] = preprocess(a , a , a )
lowercase__ : Union[str, Any] = self.prepare_latents(
a , a , a , text_embeddings.dtype , self.device , a )
lowercase__ : str = slerp(a , a , a )
if clip_guidance_scale > 0:
lowercase__ : List[str] = self.get_clip_image_embeddings(a , a )
lowercase__ : Any = self.get_clip_image_embeddings(a , a )
lowercase__ : Dict = slerp(
a , a , a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ : Tuple = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ : Optional[int] = content_text_input.input_ids.shape[-1]
lowercase__ : Optional[Any] = self.tokenizer([''] , padding='max_length' , max_length=a , return_tensors='pt' )
lowercase__ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowercase__ : Optional[int] = uncond_embeddings.repeat_interleave(a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ : Tuple = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowercase__ : Tuple = torch.randn(a , generator=a , device='cpu' , dtype=a ).to(
self.device )
else:
lowercase__ : Any = torch.randn(a , generator=a , device=self.device , dtype=a )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ : Tuple = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ : Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ : Optional[int] = {}
if accepts_eta:
lowercase__ : str = eta
# check if the scheduler accepts generator
lowercase__ : List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowercase__ : str = generator
with self.progress_bar(total=a ):
for i, t in enumerate(a ):
# expand the latents if we are doing classifier free guidance
lowercase__ : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ : Dict = self.scheduler.scale_model_input(a , a )
# predict the noise residual
lowercase__ : List[Any] = self.unet(a , a , encoder_hidden_states=a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowercase__ : List[str] = noise_pred.chunk(2 )
lowercase__ : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowercase__ : int = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowercase__ : int = self.cond_fn(
a , a , a , a , a , a , a , )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : Optional[Any] = self.scheduler.step(a , a , a , **a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase__ : int = 1 / 0.18_215 * latents
lowercase__ : List[Any] = self.vae.decode(a ).sample
lowercase__ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
lowercase__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=a , nsfw_content_detected=a )
| 702
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCamelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase__ : Dict = value
elif weight_type == "weight_v":
lowercase__ : List[str] = value
elif weight_type == "bias":
lowercase__ : Optional[Any] = value
else:
lowercase__ : List[str] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = []
lowercase__ : List[str] = fairseq_model.state_dict()
lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : int = True
if "*" in mapped_key:
lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2]
lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
lowercase__ : List[Any] = 'weight_g'
elif "weight_v" in name:
lowercase__ : int = 'weight_v'
elif "bias" in name:
lowercase__ : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : int = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = full_name.split('conv_layers.' )[-1]
lowercase__ : int = name.split('.' )
lowercase__ : int = int(items[0] )
lowercase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase )
else:
lowercase__ : Any = UniSpeechSatConfig()
lowercase__ : Union[str, Any] = ''
if is_finetuned:
lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase )
else:
lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCamelCase : str = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 645
| 0
|
import torch
from torch import nn
class UpperCamelCase ( nn.Module ):
def __init__(self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=1 , __UpperCamelCase=False ) -> Optional[int]:
super().__init__()
UpperCamelCase_ : List[str] = n_token
UpperCamelCase_ : List[Any] = d_embed
UpperCamelCase_ : List[Any] = d_proj
UpperCamelCase_ : Optional[Any] = cutoffs + [n_token]
UpperCamelCase_ : Tuple = [0] + self.cutoffs
UpperCamelCase_ : Union[str, Any] = div_val
UpperCamelCase_ : str = self.cutoffs[0]
UpperCamelCase_ : Tuple = len(self.cutoffs ) - 1
UpperCamelCase_ : int = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCamelCase_ : Union[str, Any] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCamelCase_ : Dict = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCamelCase_ : Tuple = nn.ModuleList()
UpperCamelCase_ : List[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__UpperCamelCase , __UpperCamelCase ) ) )
else:
self.out_projs.append(__UpperCamelCase )
self.out_layers.append(nn.Linear(__UpperCamelCase , __UpperCamelCase ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase_,UpperCamelCase_ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase_ : Dict = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__UpperCamelCase , __UpperCamelCase ) ) )
self.out_layers.append(nn.Linear(__UpperCamelCase , r_idx - l_idx ) )
UpperCamelCase_ : Any = keep_order
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
if proj is None:
UpperCamelCase_ : Optional[int] = nn.functional.linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCamelCase_ : Optional[Any] = nn.functional.linear(__UpperCamelCase , proj.t().contiguous() )
UpperCamelCase_ : Optional[int] = nn.functional.linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def A_ (self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=False ) -> List[Any]:
if labels is not None:
# Shift so that tokens < n predict n
UpperCamelCase_ : str = hidden[..., :-1, :].contiguous()
UpperCamelCase_ : List[Any] = labels[..., 1:].contiguous()
UpperCamelCase_ : Dict = hidden.view(-1 , hidden.size(-1 ) )
UpperCamelCase_ : Union[str, Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
UpperCamelCase_ : Optional[int] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCamelCase_ : Union[str, Any] = self._compute_logit(__UpperCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCamelCase_ : int = labels != -100
UpperCamelCase_ : Optional[int] = torch.zeros_like(__UpperCamelCase , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase_ : int = (
-nn.functional.log_softmax(__UpperCamelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCamelCase_ : Optional[Any] = nn.functional.log_softmax(__UpperCamelCase , dim=-1 )
else:
# construct weights and biases
UpperCamelCase_,UpperCamelCase_ : Union[str, Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase_,UpperCamelCase_ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase_ : List[Any] = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase_ : List[str] = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase_ : int = self.out_layers[i].weight
UpperCamelCase_ : List[Any] = self.out_layers[i].bias
if i == 0:
UpperCamelCase_ : List[str] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase_ : int = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__UpperCamelCase )
biases.append(__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_,UpperCamelCase_ : List[str] = weights[0], biases[0], self.out_projs[0]
UpperCamelCase_ : Dict = self._compute_logit(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ : int = nn.functional.log_softmax(__UpperCamelCase , dim=1 )
if labels is None:
UpperCamelCase_ : List[str] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCamelCase_ : Dict = torch.zeros_like(__UpperCamelCase , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase_ : Any = 0
UpperCamelCase_ : int = [0] + self.cutoffs
for i in range(len(__UpperCamelCase ) - 1 ):
UpperCamelCase_,UpperCamelCase_ : str = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCamelCase_ : Optional[int] = (labels >= l_idx) & (labels < r_idx)
UpperCamelCase_ : Dict = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCamelCase_ : Optional[Any] = labels.index_select(0 , __UpperCamelCase ) - l_idx
UpperCamelCase_ : Optional[Any] = head_logprob.index_select(0 , __UpperCamelCase )
UpperCamelCase_ : Dict = hidden.index_select(0 , __UpperCamelCase )
else:
UpperCamelCase_ : Union[str, Any] = hidden
if i == 0:
if labels is not None:
UpperCamelCase_ : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase_ : Optional[Any] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase_,UpperCamelCase_,UpperCamelCase_ : List[Any] = weights[i], biases[i], self.out_projs[i]
UpperCamelCase_ : Optional[Any] = self._compute_logit(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ : Tuple = nn.functional.log_softmax(__UpperCamelCase , dim=1 )
UpperCamelCase_ : Optional[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCamelCase_ : Any = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase_ : Any = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCamelCase_ : Tuple = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , __UpperCamelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def A_ (self , __UpperCamelCase ) -> Tuple:
if self.n_clusters == 0:
UpperCamelCase_ : Optional[int] = self._compute_logit(__UpperCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__UpperCamelCase , dim=-1 )
else:
# construct weights and biases
UpperCamelCase_,UpperCamelCase_ : List[str] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase_,UpperCamelCase_ : str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase_ : List[str] = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase_ : Tuple = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase_ : Optional[int] = self.out_layers[i].weight
UpperCamelCase_ : Union[str, Any] = self.out_layers[i].bias
if i == 0:
UpperCamelCase_ : List[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase_ : Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__UpperCamelCase )
biases.append(__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_,UpperCamelCase_ : List[str] = weights[0], biases[0], self.out_projs[0]
UpperCamelCase_ : List[str] = self._compute_logit(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ : Optional[int] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCamelCase_ : int = nn.functional.log_softmax(__UpperCamelCase , dim=1 )
UpperCamelCase_ : Optional[int] = [0] + self.cutoffs
for i in range(len(__UpperCamelCase ) - 1 ):
UpperCamelCase_,UpperCamelCase_ : str = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCamelCase_ : Optional[int] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase_,UpperCamelCase_,UpperCamelCase_ : Tuple = weights[i], biases[i], self.out_projs[i]
UpperCamelCase_ : Any = self._compute_logit(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ : int = nn.functional.log_softmax(__UpperCamelCase , dim=1 )
UpperCamelCase_ : Union[str, Any] = head_logprob[:, -i] + tail_logprob_i
UpperCamelCase_ : Union[str, Any] = logprob_i
return out
| 635
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Dict = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
SCREAMING_SNAKE_CASE : int = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Any = {F'''funnel-transformer/{name}''': 512 for name in _model_names}
SCREAMING_SNAKE_CASE : Tuple = {F'''funnel-transformer/{name}''': {"do_lower_case": True} for name in _model_names}
class UpperCamelCase ( __a ):
a__ :Dict = VOCAB_FILES_NAMES
a__ :Dict = PRETRAINED_VOCAB_FILES_MAP
a__ :Dict = PRETRAINED_INIT_CONFIGURATION
a__ :str = FunnelTokenizer
a__ :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ :int = 2
def __init__(self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase="<unk>" , __UpperCamelCase="<sep>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<cls>" , __UpperCamelCase="<mask>" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase="##" , **__UpperCamelCase , ) -> List[Any]:
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , clean_text=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , wordpieces_prefix=__UpperCamelCase , **__UpperCamelCase , )
UpperCamelCase_ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __UpperCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __UpperCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __UpperCamelCase ) != tokenize_chinese_chars
):
UpperCamelCase_ : List[Any] = getattr(__UpperCamelCase , normalizer_state.pop("""type""" ) )
UpperCamelCase_ : Union[str, Any] = do_lower_case
UpperCamelCase_ : Tuple = strip_accents
UpperCamelCase_ : Dict = tokenize_chinese_chars
UpperCamelCase_ : Union[str, Any] = normalizer_class(**__UpperCamelCase )
UpperCamelCase_ : Dict = do_lower_case
def A_ (self , __UpperCamelCase , __UpperCamelCase=None ) -> Dict:
UpperCamelCase_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ (self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
UpperCamelCase_ : Optional[Any] = [self.sep_token_id]
UpperCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ (self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
UpperCamelCase_ : int = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 635
| 1
|
"""simple docstring"""
def __A ( a_ :int) -> List[Any]:
__a : Dict = abs(snake_case__)
__a : Optional[int] = 0
while n > 0:
res += n % 10
n //= 10
return res
def __A ( a_ :int) -> int:
__a : str = abs(snake_case__)
return n if n < 10 else n % 10 + sum_of_digits(n // 10)
def __A ( a_ :int) -> Any:
return sum(int(snake_case__) for c in str(abs(snake_case__)))
def __A ( ) -> Any:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(a_ :Callable , a_ :int) -> None:
__a : Union[str, Any] = F"""{func.__name__}({value})"""
__a : int = timeit(F"""__main__.{call}""" , setup='''import __main__''')
print(F"""{call:56} = {func(snake_case__)} -- {timing:.4f} seconds""")
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(snake_case__ , snake_case__)
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 700
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A = logging.get_logger(__name__)
A = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''detr'''
__lowerCAmelCase = ['''past_key_values''']
__lowerCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=3 , _UpperCAmelCase=100 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase="sine" , _UpperCAmelCase="resnet50" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , **_UpperCAmelCase , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__a : Any = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = backbone_config.get('''model_type''' )
__a : Tuple = CONFIG_MAPPING[backbone_model_type]
__a : Union[str, Any] = config_class.from_dict(_UpperCAmelCase )
# set timm attributes to None
__a , __a , __a : Union[str, Any] = None, None, None
__a : Union[str, Any] = use_timm_backbone
__a : Any = backbone_config
__a : Tuple = num_channels
__a : int = num_queries
__a : str = d_model
__a : Any = encoder_ffn_dim
__a : int = encoder_layers
__a : Optional[int] = encoder_attention_heads
__a : Any = decoder_ffn_dim
__a : str = decoder_layers
__a : Union[str, Any] = decoder_attention_heads
__a : List[Any] = dropout
__a : Union[str, Any] = attention_dropout
__a : int = activation_dropout
__a : Dict = activation_function
__a : str = init_std
__a : int = init_xavier_std
__a : Optional[Any] = encoder_layerdrop
__a : Optional[int] = decoder_layerdrop
__a : str = encoder_layers
__a : List[str] = auxiliary_loss
__a : Optional[Any] = position_embedding_type
__a : Any = backbone
__a : Tuple = use_pretrained_backbone
__a : int = dilation
# Hungarian matcher
__a : str = class_cost
__a : Optional[Any] = bbox_cost
__a : Any = giou_cost
# Loss coefficients
__a : List[str] = mask_loss_coefficient
__a : Dict = dice_loss_coefficient
__a : str = bbox_loss_coefficient
__a : str = giou_loss_coefficient
__a : Any = eos_coefficient
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def _lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self ):
return self.d_model
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(backbone_config=_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Dict = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__a : Tuple = self.backbone_config.to_dict()
__a : Dict = self.__class__.model_type
return output
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _lowerCamelCase ( self ):
return 1e-5
@property
def _lowerCamelCase ( self ):
return 12
| 101
| 0
|
"""simple docstring"""
def a_ ( ):
return 1
def a_ ( lowercase__ :Optional[Any] ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def a_ ( lowercase__ :int ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(a_ )
def a_ ( lowercase__ :Dict ):
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(a_ )
def a_ ( lowercase__ :List[str] ):
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(a_ )
def a_ ( lowercase__ :Optional[Any] ):
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(a_ )
def a_ ( lowercase__ :Dict ):
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(a_ )
def a_ ( lowercase__ :Tuple ):
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(a_ )
def a_ ( lowercase__ :List[Any] = 200 ):
return two_pound(a_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 281
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def _lowercase ( self ) -> int:
'''simple docstring'''
lowerCAmelCase_ = self.dummy_uncond_unet
lowerCAmelCase_ = ScoreSdeVeScheduler()
lowerCAmelCase_ = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_ )
sde_ve.to(lowercase_ )
sde_ve.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=lowercase_ ).images
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=lowercase_ , return_dict=lowercase_ )[
0
]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'google/ncsnpp-church-256'
lowerCAmelCase_ = UNetaDModel.from_pretrained(lowercase_ )
lowerCAmelCase_ = ScoreSdeVeScheduler.from_pretrained(lowercase_ )
lowerCAmelCase_ = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_ )
sde_ve.to(lowercase_ )
sde_ve.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = sde_ve(num_inference_steps=1_0 , output_type='numpy' , generator=lowercase_ ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
lowerCAmelCase_ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 318
| 0
|
from __future__ import annotations
import requests
A = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = "new" , lowerCamelCase__ = None ) -> dict:
A = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowerCamelCase__ ) - valid_terms ) ):
A = f"""Invalid search term: {invalid_search_terms}"""
raise ValueError(lowerCamelCase__ )
A = requests.get(
f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'User-agent': 'A random string'} , )
if response.status_code == 429:
raise requests.HTTPError
A = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowerCamelCase__ )}
A = {}
for id_ in range(lowerCamelCase__ ):
A = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 711
|
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
A = logging.get_logger(__name__)
def lowerCAmelCase__ ( lowerCamelCase__=None , lowerCamelCase__=None ) -> Dict:
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ : List[str] = list_field(
default=[] ,metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} ,)
lowerCAmelCase_ : List[int] = list_field(
default=[8] ,metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
lowerCAmelCase_ : List[int] = list_field(
default=[8, 32, 1_28, 5_12] ,metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} ,)
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} ,)
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} ,)
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
lowerCAmelCase_ : bool = field(default=UpperCamelCase ,metadata={"""help""": """Use FP16 to accelerate inference."""} )
lowerCAmelCase_ : bool = field(default=UpperCamelCase ,metadata={"""help""": """Benchmark training of model"""} )
lowerCAmelCase_ : bool = field(default=UpperCamelCase ,metadata={"""help""": """Verbose memory tracing"""} )
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} ,)
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} ,)
lowerCAmelCase_ : bool = field(default=UpperCamelCase ,metadata={"""help""": """Trace memory line by line"""} )
lowerCAmelCase_ : bool = field(default=UpperCamelCase ,metadata={"""help""": """Save result to a CSV file"""} )
lowerCAmelCase_ : bool = field(default=UpperCamelCase ,metadata={"""help""": """Save all print statements in a log file"""} )
lowerCAmelCase_ : bool = field(default=UpperCamelCase ,metadata={"""help""": """Whether to print environment information"""} )
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} ,)
lowerCAmelCase_ : str = field(
default=f"""inference_time_{round(time() )}.csv""" ,metadata={"""help""": """CSV filename used if saving time results to csv."""} ,)
lowerCAmelCase_ : str = field(
default=f"""inference_memory_{round(time() )}.csv""" ,metadata={"""help""": """CSV filename used if saving memory results to csv."""} ,)
lowerCAmelCase_ : str = field(
default=f"""train_time_{round(time() )}.csv""" ,metadata={"""help""": """CSV filename used if saving time results to csv for training."""} ,)
lowerCAmelCase_ : str = field(
default=f"""train_memory_{round(time() )}.csv""" ,metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} ,)
lowerCAmelCase_ : str = field(
default=f"""env_info_{round(time() )}.csv""" ,metadata={"""help""": """CSV filename used if saving environment information."""} ,)
lowerCAmelCase_ : str = field(
default=f"""log_{round(time() )}.csv""" ,metadata={"""help""": """Log filename used if print statements are saved in log."""} ,)
lowerCAmelCase_ : int = field(default=3 ,metadata={"""help""": """Times an experiment will be run."""} )
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} ,)
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , snake_case , )
def A_ ( self : str ) -> List[Any]:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def A_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True
| 109
| 0
|
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''multiplicative_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 1
for i in range(0 , len(__A)):
total *= numbers[i]
_a = str(__A)
steps += 1
return steps
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''additive_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 0
for i in range(0 , len(__A)):
total += numbers[i]
_a = str(__A)
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = CTRLTokenizer
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Any = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_a = dict(zip(A , range(len(A ) ) ) )
_a = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def a__ (self , **A ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **A )
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = '''adapt react readapt apt'''
_a = '''adapt react readapt apt'''
return input_text, output_text
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''adapt react readapt apt'''
_a = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_a = tokenizer.tokenize(A )
self.assertListEqual(A , A )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
| 11
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {"""vocab_file""": """sentencepiece.model"""}
_a = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
_a = {
"""google/rembert""": 256,
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __a , __a=False , __a=True , __a=True , __a="[CLS]" , __a="[SEP]" , __a="[UNK]" , __a="[SEP]" , __a="[PAD]" , __a="[CLS]" , __a="[MASK]" , **__a , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , **__a , )
_UpperCamelCase = do_lower_case
_UpperCamelCase = remove_space
_UpperCamelCase = keep_accents
_UpperCamelCase = vocab_file
_UpperCamelCase = spm.SentencePieceProcessor()
self.sp_model.Load(__a)
@property
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return len(self.sp_model)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = {self.convert_ids_to_tokens(__a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self , __a) -> str:
'''simple docstring'''
_UpperCamelCase = d
_UpperCamelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase ( self , __a , __a=False) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.sp_model.EncodeAsPieces(__a)
return pieces
def UpperCAmelCase ( self , __a) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.PieceToId(__a)
def UpperCAmelCase ( self , __a) -> Dict:
'''simple docstring'''
return self.sp_model.IdToPiece(__a)
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.sp_model.decode_pieces(__a)
return out_string
def UpperCAmelCase ( self , __a , __a = None) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self , __a , __a = None , __a = False) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__a)) + [1] + ([0] * len(__a)) + [1]
return [1] + ([0] * len(__a)) + [1]
def UpperCAmelCase ( self , __a , __a = None) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCAmelCase ( self , __a , __a = None) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__a):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__a))
return
_UpperCamelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__a):
copyfile(self.vocab_file , __a)
return (out_vocab_file,)
| 78
|
"""simple docstring"""
def lowerCamelCase__ ( ) -> list[list[int]]:
"""simple docstring"""
return [list(range(10_00 - i, -10_00 - i, -1 ) ) for i in range(10_00 )]
_a = generate_large_matrix()
_a = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCamelCase__ ( __snake_case ) -> None:
"""simple docstring"""
assert all(row == sorted(__snake_case, reverse=__snake_case ) for row in grid )
assert all(list(__snake_case ) == sorted(__snake_case, reverse=__snake_case ) for col in zip(*__snake_case ) )
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = len(__snake_case ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_UpperCamelCase = (left + right) // 2
_UpperCamelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_UpperCamelCase = mid + 1
else:
_UpperCamelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__snake_case )
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = len(grid[0] )
for i in range(len(__snake_case ) ):
_UpperCamelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(__snake_case ) * len(grid[0] )) - total
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = 0
for row in grid:
for i, number in enumerate(__snake_case ):
if number < 0:
total += len(__snake_case ) - i
break
return total
def lowerCamelCase__ ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('''Running benchmarks''' )
_UpperCamelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_UpperCamelCase = timeit(F'''{func}(grid=grid)''', setup=__snake_case, number=5_00 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 78
| 1
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def lowercase (_lowerCAmelCase ):
return np.maximum(0 , _lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 465
|
"""simple docstring"""
def __magic_name__ ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ) -> int:
def count_of_possible_combinations(UpperCamelCase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCamelCase )
def __magic_name__ ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ) -> int:
def count_of_possible_combinations_with_dp_array(
UpperCamelCase : int , UpperCamelCase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
a__ = sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase )
for item in array )
a__ = answer
return answer
a__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCamelCase , UpperCamelCase )
def __magic_name__ ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ) -> int:
a__ = [0] * (target + 1)
a__ = 1
for i in range(1 , target + 1 ):
for j in range(UpperCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Optional[Any] = 3
a : List[str] = 5
a : Dict = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 273
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ ={
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = "arrow" , **UpperCAmelCase_ , ):
super().__init__(
split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCAmelCase = load_from_cache_file
lowerCAmelCase = file_format
lowerCAmelCase = Spark(
df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , )
def __snake_case ( self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 33
| 1
|
import math
import unittest
def UpperCamelCase__( UpperCamelCase__ : Tuple )->bool:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def UpperCamelCase ( self ):
with self.assertRaises(__lowerCamelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ),'''Zero doesn\'t have any positive factors, primes must have exactly two.''',)
self.assertFalse(
is_prime(1 ),'''One only has 1 positive factor, primes must have exactly two.''',)
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 190
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __lowercase ( unittest.TestCase ):
def __a ( self : Any ) -> Tuple:
'''simple docstring'''
lowercase = [[1, 2, 4], [1, 2, 3, 4]]
lowercase = DisjunctiveConstraint(__lowerCamelCase )
self.assertTrue(isinstance(dc.token_ids , __lowerCamelCase ) )
with self.assertRaises(__lowerCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowerCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __a ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowerCamelCase ):
DisjunctiveConstraint(__lowerCamelCase ) # fails here
def __a ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowercase = [[1, 2, 3], [1, 2, 4]]
lowercase = DisjunctiveConstraint(__lowerCamelCase )
lowercase ,lowercase ,lowercase = dc.update(1 )
lowercase = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase ,lowercase ,lowercase = dc.update(2 )
lowercase = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase ,lowercase ,lowercase = dc.update(3 )
lowercase = stepped is True and completed is True and reset is False
self.assertTrue(__lowerCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __a ( self : Optional[int] ) -> Dict:
'''simple docstring'''
lowercase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowercase = DisjunctiveConstraint(__lowerCamelCase )
lowercase ,lowercase ,lowercase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase ,lowercase ,lowercase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase ,lowercase ,lowercase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowercase ,lowercase ,lowercase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowercase ,lowercase ,lowercase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowercase ,lowercase ,lowercase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase ,lowercase ,lowercase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 604
| 0
|
"""simple docstring"""
def lowercase ( _snake_case : int ) ->bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 229
| 1
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( A__ ):
__A : Dict = ["""image_processor""", """tokenizer"""]
__A : List[str] = """BridgeTowerImageProcessor"""
__A : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
# add pixel_values + pixel_mask
_UpperCAmelCase = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase )
encoding.update(_UpperCamelCase )
return encoding
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 32
|
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _A( lowerCAmelCase ):
A__ : int = fname.split(os.path.sep )[-1]
return re.search(r"""^(.*)_\d+\.jpg$""" , lowerCAmelCase ).groups()[0]
class __UpperCAmelCase (__A ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=None , snake_case_=None ):
'''simple docstring'''
A__ : Dict = file_names
A__ : str = image_transform
A__ : Dict = label_to_id
def __len__( self ):
'''simple docstring'''
return len(self.file_names )
def __getitem__( self , snake_case_ ):
'''simple docstring'''
A__ : Optional[Any] = self.file_names[idx]
A__ : Optional[Any] = PIL.Image.open(snake_case_ )
A__ : str = raw_image.convert("""RGB""" )
if self.image_transform is not None:
A__ : Optional[int] = self.image_transform(snake_case_ )
A__ : Dict = extract_label(snake_case_ )
if self.label_to_id is not None:
A__ : List[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def _A( lowerCAmelCase , lowerCAmelCase ):
# Initialize accelerator
if args.with_tracking:
A__ : List[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
A__ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : List[Any] = config["""lr"""]
A__ : Any = int(config["""num_epochs"""] )
A__ : List[Any] = int(config["""seed"""] )
A__ : Tuple = int(config["""batch_size"""] )
A__ : List[str] = config["""image_size"""]
if not isinstance(lowerCAmelCase , (list, tuple) ):
A__ : List[str] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , """isdigit""" ):
if args.checkpointing_steps == "epoch":
A__ : List[Any] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
A__ : int = int(args.checkpointing_steps )
else:
raise ValueError(
F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
A__ : Any = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
A__ : int = os.path.split(lowerCAmelCase )[-1].split(""".""" )[0]
accelerator.init_trackers(lowerCAmelCase , lowerCAmelCase )
# Grab all the image filenames
A__ : Union[str, Any] = [os.path.join(args.data_dir , lowerCAmelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )]
# Build the label correspondences
A__ : int = [extract_label(lowerCAmelCase ) for fname in file_names]
A__ : Dict = list(set(lowerCAmelCase ) )
id_to_label.sort()
A__ : int = {lbl: i for i, lbl in enumerate(lowerCAmelCase )}
# Set the seed before splitting the data.
np.random.seed(lowerCAmelCase )
torch.manual_seed(lowerCAmelCase )
torch.cuda.manual_seed_all(lowerCAmelCase )
# Split our filenames between train and validation
A__ : str = np.random.permutation(len(lowerCAmelCase ) )
A__ : Optional[int] = int(0.8 * len(lowerCAmelCase ) )
A__ : Union[str, Any] = random_perm[:cut]
A__ : Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
A__ : Union[str, Any] = Compose([RandomResizedCrop(lowerCAmelCase , scale=(0.5, 1.0) ), ToTensor()] )
A__ : Dict = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowerCAmelCase , label_to_id=lowerCAmelCase )
# For evaluation, we use a deterministic Resize
A__ : Optional[int] = Compose([Resize(lowerCAmelCase ), ToTensor()] )
A__ : Optional[Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowerCAmelCase , label_to_id=lowerCAmelCase )
# Instantiate dataloaders.
A__ : List[Any] = DataLoader(lowerCAmelCase , shuffle=lowerCAmelCase , batch_size=lowerCAmelCase , num_workers=4 )
A__ : str = DataLoader(lowerCAmelCase , shuffle=lowerCAmelCase , batch_size=lowerCAmelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Dict = create_model("""resnet50d""" , pretrained=lowerCAmelCase , num_classes=len(lowerCAmelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ : Tuple = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
A__ : Any = False
for param in model.get_classifier().parameters():
A__ : Dict = True
# We normalize the batches of images to be a bit faster.
A__ : List[Any] = torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device )
A__ : int = torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
A__ : str = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
A__ : Optional[int] = OneCycleLR(optimizer=lowerCAmelCase , max_lr=lowerCAmelCase , epochs=lowerCAmelCase , steps_per_epoch=len(lowerCAmelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : Dict = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
A__ : str = 0
# We also need to keep track of the starting epoch so files are named properly
A__ : Union[str, Any] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
A__ : Dict = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
A__ : Tuple = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
A__ : Optional[Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
A__ : Optional[Any] = os.path.splitext(lowerCAmelCase )[0]
if "epoch" in training_difference:
A__ : Optional[Any] = int(training_difference.replace("""epoch_""" , """""" ) ) + 1
A__ : int = None
else:
A__ : Optional[Any] = int(training_difference.replace("""step_""" , """""" ) )
A__ : Union[str, Any] = resume_step // len(lowerCAmelCase )
resume_step -= starting_epoch * len(lowerCAmelCase )
# Now we train the model
for epoch in range(lowerCAmelCase , lowerCAmelCase ):
model.train()
if args.with_tracking:
A__ : List[Any] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
A__ : int = accelerator.skip_first_batches(lowerCAmelCase , lowerCAmelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
A__ : Optional[int] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
A__ : Union[str, Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
A__ : Any = (batch["""image"""] - mean) / std
A__ : Tuple = model(lowerCAmelCase )
A__ : int = torch.nn.functional.cross_entropy(lowerCAmelCase , batch["""label"""] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowerCAmelCase , lowerCAmelCase ):
A__ : Union[str, Any] = F'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
A__ : List[str] = os.path.join(args.output_dir , lowerCAmelCase )
accelerator.save_state(lowerCAmelCase )
model.eval()
A__ : Any = 0
A__ : Tuple = 0
for step, batch in enumerate(lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
A__ : List[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
A__ : Any = (batch["""image"""] - mean) / std
with torch.no_grad():
A__ : Optional[int] = model(lowerCAmelCase )
A__ : Tuple = outputs.argmax(dim=-1 )
A__ , A__ : Tuple = accelerator.gather_for_metrics((predictions, batch["""label"""]) )
A__ : int = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
A__ : List[Any] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
"""accuracy""": 100 * eval_metric,
"""train_loss""": total_loss.item() / len(lowerCAmelCase ),
"""epoch""": epoch,
} , step=lowerCAmelCase , )
if checkpointing_steps == "epoch":
A__ : Any = F'''epoch_{epoch}'''
if args.output_dir is not None:
A__ : Any = os.path.join(args.output_dir , lowerCAmelCase )
accelerator.save_state(lowerCAmelCase )
if args.with_tracking:
accelerator.end_training()
def _A( ):
A__ : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument("""--data_dir""" , required=lowerCAmelCase , help="""The data folder on disk.""" )
parser.add_argument("""--fp16""" , action="""store_true""" , help="""If passed, will use FP16 training.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase , default=lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--checkpointing_steps""" , type=lowerCAmelCase , default=lowerCAmelCase , help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""" , )
parser.add_argument(
"""--output_dir""" , type=lowerCAmelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowerCAmelCase , default=lowerCAmelCase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=lowerCAmelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
A__ : Optional[Any] = parser.parse_args()
A__ : Any = {"""lr""": 3E-2, """num_epochs""": 3, """seed""": 42, """batch_size""": 64, """image_size""": 224}
training_function(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
main()
| 363
| 0
|
from __future__ import annotations
from collections import namedtuple
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
_a : Optional[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 249
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self : Any , __snake_case : int , __snake_case : Optional[Any]=13 , __snake_case : int=7 , __snake_case : Dict=True , __snake_case : str=True , __snake_case : List[str]=True , __snake_case : int=True , __snake_case : str=99 , __snake_case : Dict=24 , __snake_case : int=2 , __snake_case : Dict=6 , __snake_case : str=37 , __snake_case : str="gelu" , __snake_case : List[str]=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Optional[int]=512 , __snake_case : Any=16 , __snake_case : Optional[int]=2 , __snake_case : List[Any]=0.02 , __snake_case : str=3 , __snake_case : List[Any]=None , __snake_case : Any=1000 , ) -> str:
_a : Dict = parent
_a : Tuple = batch_size
_a : Optional[int] = seq_length
_a : Optional[int] = is_training
_a : Dict = use_input_mask
_a : Optional[Any] = use_token_type_ids
_a : List[Any] = use_labels
_a : List[str] = vocab_size
_a : int = hidden_size
_a : List[str] = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : Dict = intermediate_size
_a : str = hidden_act
_a : str = hidden_dropout_prob
_a : Union[str, Any] = attention_probs_dropout_prob
_a : Tuple = max_position_embeddings
_a : List[str] = type_vocab_size
_a : List[Any] = type_sequence_label_size
_a : Optional[int] = initializer_range
_a : str = num_labels
_a : int = scope
_a : Tuple = range_bbox
def snake_case_ ( self : Any ) -> Any:
_a : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : str = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_a : Any = bbox[i, j, 3]
_a : Any = bbox[i, j, 1]
_a : Optional[int] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_a : int = bbox[i, j, 2]
_a : str = bbox[i, j, 0]
_a : List[Any] = t
_a : Any = None
if self.use_input_mask:
_a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_a : Optional[Any] = None
if self.use_token_type_ids:
_a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Any = None
_a : Union[str, Any] = None
if self.use_labels:
_a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case_ ( self : str ) -> List[str]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def snake_case_ ( self : int , __snake_case : List[str] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Any , __snake_case : int , ) -> Any:
_a : Union[str, Any] = LiltModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : Union[str, Any] = model(__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
_a : List[Any] = model(__snake_case , bbox=__snake_case , token_type_ids=__snake_case )
_a : List[Any] = model(__snake_case , bbox=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self : Dict , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : int , ) -> Tuple:
_a : List[str] = self.num_labels
_a : Optional[Any] = LiltForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : Optional[Any] = model(
__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self : Tuple , __snake_case : int , __snake_case : int , __snake_case : List[str] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Optional[int] , ) -> Optional[int]:
_a : List[str] = LiltForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : int = model(
__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self : Any ) -> Optional[int]:
_a : List[Any] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Optional[Any] = config_and_inputs
_a : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Optional[Any] = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Any = False
UpperCAmelCase : Optional[Any] = False
def snake_case_ ( self : Optional[int] , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Dict , __snake_case : Optional[int] ) -> List[str]:
return True
def snake_case_ ( self : int ) -> Dict:
_a : Union[str, Any] = LiltModelTester(self )
_a : List[str] = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def snake_case_ ( self : Dict ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def snake_case_ ( self : str ) -> Tuple:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case_ ( self : str ) -> str:
_a : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : Any = type
self.model_tester.create_and_check_model(*__snake_case )
def snake_case_ ( self : Optional[Any] ) -> List[Any]:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
def snake_case_ ( self : List[Any] ) -> List[Any]:
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
@slow
def snake_case_ ( self : Dict ) -> Optional[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : List[str] = LiltModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
@slow
class lowerCamelCase ( unittest.TestCase ):
def snake_case_ ( self : Optional[Any] ) -> str:
_a : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__snake_case )
_a : List[str] = torch.tensor([[1, 2]] , device=__snake_case )
_a : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__snake_case )
# forward pass
with torch.no_grad():
_a : List[Any] = model(input_ids=__snake_case , bbox=__snake_case )
_a : Optional[Any] = torch.Size([1, 2, 768] )
_a : Optional[Any] = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__snake_case , )
self.assertTrue(outputs.last_hidden_state.shape , __snake_case )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __snake_case , atol=1E-3 ) )
| 249
| 1
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Optional[int] = 'blip_2_vision_model'
def __init__( self , _UpperCamelCase=1408 , _UpperCamelCase=6144 , _UpperCamelCase=39 , _UpperCamelCase=16 , _UpperCamelCase=224 , _UpperCamelCase=14 , _UpperCamelCase="gelu" , _UpperCamelCase=0.0_0_0_0_1 , _UpperCamelCase=0.0 , _UpperCamelCase=1E-1_0 , _UpperCamelCase=True , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(**_UpperCamelCase )
_lowercase : Optional[int] = hidden_size
_lowercase : str = intermediate_size
_lowercase : Any = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : Union[str, Any] = patch_size
_lowercase : Dict = image_size
_lowercase : Any = initializer_range
_lowercase : str = attention_dropout
_lowercase : Union[str, Any] = layer_norm_eps
_lowercase : str = hidden_act
_lowercase : Optional[int] = qkv_bias
@classmethod
def _lowerCamelCase ( cls , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCamelCase )
_lowercase , _lowercase : List[str] = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
_lowercase : List[str] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Any = 'blip_2_qformer'
def __init__( self , _UpperCamelCase=30522 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=0.0_2 , _UpperCamelCase=1E-1_2 , _UpperCamelCase=0 , _UpperCamelCase="absolute" , _UpperCamelCase=2 , _UpperCamelCase=1408 , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
_lowercase : List[Any] = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : Tuple = hidden_act
_lowercase : Tuple = intermediate_size
_lowercase : int = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : Optional[Any] = max_position_embeddings
_lowercase : str = initializer_range
_lowercase : Any = layer_norm_eps
_lowercase : Any = position_embedding_type
_lowercase : Dict = cross_attention_frequency
_lowercase : Union[str, Any] = encoder_hidden_size
@classmethod
def _lowerCamelCase ( cls , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCamelCase )
_lowercase , _lowercase : Optional[int] = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
_lowercase : List[Any] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : List[Any] = 'blip-2'
_SCREAMING_SNAKE_CASE : Optional[Any] = True
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=32 , **_UpperCamelCase ):
"""simple docstring"""
super().__init__(**_UpperCamelCase )
if vision_config is None:
_lowercase : str = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
_lowercase : Optional[int] = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
_lowercase : List[Any] = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
_lowercase : Any = BlipaVisionConfig(**_UpperCamelCase )
_lowercase : Any = BlipaQFormerConfig(**_UpperCamelCase )
_lowercase : Optional[int] = text_config["model_type"] if "model_type" in text_config else "opt"
_lowercase : str = CONFIG_MAPPING[text_model_type](**_UpperCamelCase )
_lowercase : Union[str, Any] = self.text_config.tie_word_embeddings
_lowercase : Any = self.text_config.is_encoder_decoder
_lowercase : Optional[int] = num_query_tokens
_lowercase : int = self.vision_config.hidden_size
_lowercase : List[str] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : Optional[int] = 1.0
_lowercase : List[Any] = 0.0_2
@classmethod
def _lowerCamelCase ( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ):
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCamelCase , )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = copy.deepcopy(self.__dict__ )
_lowercase : int = self.vision_config.to_dict()
_lowercase : Dict = self.qformer_config.to_dict()
_lowercase : Any = self.text_config.to_dict()
_lowercase : List[Any] = self.__class__.model_type
return output
| 245
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_snake_case = datasets.utils.logging.get_logger(__name__)
_snake_case = ['names', 'prefix']
_snake_case = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
_snake_case = ['encoding_errors', 'on_bad_lines']
_snake_case = ['date_format']
@dataclass
class a__ ( datasets.BuilderConfig ):
_SCREAMING_SNAKE_CASE : str = ","
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : Optional[Union[int, List[int], str]] = "infer"
_SCREAMING_SNAKE_CASE : Optional[List[str]] = None
_SCREAMING_SNAKE_CASE : Optional[List[str]] = None
_SCREAMING_SNAKE_CASE : Optional[Union[int, str, List[int], List[str]]] = None
_SCREAMING_SNAKE_CASE : Optional[Union[List[int], List[str]]] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : Optional[Literal["c", "python", "pyarrow"]] = None
_SCREAMING_SNAKE_CASE : Dict[Union[int, str], Callable[[Any], Any]] = None
_SCREAMING_SNAKE_CASE : Optional[list] = None
_SCREAMING_SNAKE_CASE : Optional[list] = None
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : Optional[Union[int, List[int]]] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : str = "."
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : str = '"'
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : int = 1_0000
_SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
_SCREAMING_SNAKE_CASE : Optional[str] = "strict"
_SCREAMING_SNAKE_CASE : Literal["error", "warn", "skip"] = "error"
_SCREAMING_SNAKE_CASE : Optional[str] = None
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.delimiter is not None:
_lowercase : Optional[Any] = self.delimiter
if self.column_names is not None:
_lowercase : str = self.column_names
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Any = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _UpperCamelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class a__ ( datasets.ArrowBasedBuilder ):
_SCREAMING_SNAKE_CASE : int = CsvConfig
def _lowerCamelCase ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_lowercase : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCamelCase , (str, list, tuple) ):
_lowercase : List[Any] = data_files
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_lowercase : Union[str, Any] = [files]
_lowercase : int = [dl_manager.iter_files(_UpperCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_lowercase : List[Any] = []
for split_name, files in data_files.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_lowercase : Tuple = [files]
_lowercase : Tuple = [dl_manager.iter_files(_UpperCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCamelCase , gen_kwargs={"files": files} ) )
return splits
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
if self.config.features is not None:
_lowercase : Optional[int] = self.config.features.arrow_schema
if all(not require_storage_cast(_UpperCamelCase ) for feature in self.config.features.values() ):
# cheaper cast
_lowercase : int = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_UpperCamelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_lowercase : Optional[int] = table_cast(_UpperCamelCase , _UpperCamelCase )
return pa_table
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_lowercase : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_UpperCamelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_UpperCamelCase ) ):
_lowercase : List[str] = pd.read_csv(_UpperCamelCase , iterator=_UpperCamelCase , dtype=_UpperCamelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_UpperCamelCase ):
_lowercase : Optional[Any] = pa.Table.from_pandas(_UpperCamelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_UpperCamelCase )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(_UpperCamelCase )}: {e}''' )
raise
| 245
| 1
|
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = np.inf
def set_batch_size(A__ : List[Any] ) -> None:
nonlocal batch_size
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowerCamelCase = min(__UpperCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowerCamelCase = min(__UpperCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__UpperCamelCase , __UpperCamelCase ) and feature.dtype == "binary":
__lowerCamelCase = min(__UpperCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__UpperCamelCase , __UpperCamelCase )
return None if batch_size is np.inf else batch_size
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: str , UpperCamelCase_: Dict , UpperCamelCase_: int = None , UpperCamelCase_: Union[str, Any] = None , UpperCamelCase_: Any = None , UpperCamelCase_: str = False , UpperCamelCase_: Union[str, Any] = False , UpperCamelCase_: Tuple = None , **UpperCamelCase_: Tuple , ):
super().__init__(
_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , streaming=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCamelCase = path_or_paths if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else {self.split: path_or_paths}
__lowerCamelCase = _PACKAGED_DATASETS_MODULES["""parquet"""][1]
__lowerCamelCase = Parquet(
cache_dir=_SCREAMING_SNAKE_CASE , data_files=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , hash=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def lowerCAmelCase__ ( self: Any ):
if self.streaming:
__lowerCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=_SCREAMING_SNAKE_CASE , download_mode=_SCREAMING_SNAKE_CASE , verification_mode=_SCREAMING_SNAKE_CASE , base_path=_SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
__lowerCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=_SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] = None , **UpperCamelCase_: str , ):
__lowerCamelCase = dataset
__lowerCamelCase = path_or_buf
__lowerCamelCase = batch_size or get_writer_batch_size(dataset.features )
__lowerCamelCase = parquet_writer_kwargs
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , """wb+""" ) as buffer:
__lowerCamelCase = self._write(file_obj=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
else:
__lowerCamelCase = self._write(file_obj=self.path_or_buf , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
return written
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict , **UpperCamelCase_: int ):
__lowerCamelCase = 0
__lowerCamelCase = parquet_writer_kwargs.pop("""path_or_buf""" , _SCREAMING_SNAKE_CASE )
__lowerCamelCase = self.dataset.features.arrow_schema
__lowerCamelCase = pq.ParquetWriter(_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , _SCREAMING_SNAKE_CASE ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating parquet from Arrow format""" , ):
__lowerCamelCase = query_table(
table=self.dataset._data , key=slice(_SCREAMING_SNAKE_CASE , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(_SCREAMING_SNAKE_CASE )
written += batch.nbytes
writer.close()
return written
| 713
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet in self.resnets:
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=True ):
for resnet in self.resnets:
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: int ):
# there is always at least one resnet
__lowerCamelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowerCamelCase = []
for _ in range(self.num_layers ):
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
def __call__( self: int , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=True ):
__lowerCamelCase = self.resnets[0](UpperCamelCase_ , UpperCamelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
return hidden_states
| 80
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A__: str = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE_):
"""simple docstring"""
UpperCamelCase__ = ["""pixel_values"""]
def __init__( self: Tuple , __lowerCamelCase: bool = True , __lowerCamelCase: Dict[str, int] = None , __lowerCamelCase: float = None , __lowerCamelCase: PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase: bool = True , __lowerCamelCase: Union[int, float] = 1 / 255 , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[float, List[float]]] = None , __lowerCamelCase: Optional[Union[float, List[float]]] = None , **__lowerCamelCase: Optional[int] , ):
'''simple docstring'''
super().__init__(**_a )
UpperCamelCase__: Any = size if size is not None else {'shortest_edge': 384}
UpperCamelCase__: Union[str, Any] = get_size_dict(_a , default_to_square=_a )
UpperCamelCase__: Optional[int] = do_resize
UpperCamelCase__: Optional[Any] = size
# Default value set here for backwards compatibility where the value in config is None
UpperCamelCase__: Union[str, Any] = crop_pct if crop_pct is not None else 224 / 256
UpperCamelCase__: Tuple = resample
UpperCamelCase__: str = do_rescale
UpperCamelCase__: Optional[Any] = rescale_factor
UpperCamelCase__: Optional[Any] = do_normalize
UpperCamelCase__: Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__: int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self: int , __lowerCamelCase: np.ndarray , __lowerCamelCase: Dict[str, int] , __lowerCamelCase: float , __lowerCamelCase: PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase: Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase: List[Any] , ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(F"Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}" )
UpperCamelCase__: Any = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCamelCase__: List[str] = int(shortest_edge / crop_pct )
UpperCamelCase__: Optional[int] = get_resize_output_image_size(_a , size=_a , default_to_square=_a )
UpperCamelCase__: Any = resize(image=_a , size=_a , resample=_a , data_format=_a , **_a )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_a , size=(shortest_edge, shortest_edge) , data_format=_a , **_a )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_a , size=(shortest_edge, shortest_edge) , resample=_a , data_format=_a , **_a )
def UpperCAmelCase_ ( self: Optional[Any] , __lowerCamelCase: np.ndarray , __lowerCamelCase: Union[int, float] , __lowerCamelCase: Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase: List[str] , ):
'''simple docstring'''
return rescale(_a , scale=_a , data_format=_a , **_a )
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: np.ndarray , __lowerCamelCase: Union[float, List[float]] , __lowerCamelCase: Union[float, List[float]] , __lowerCamelCase: Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase: int , ):
'''simple docstring'''
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: ImageInput , __lowerCamelCase: bool = None , __lowerCamelCase: Dict[str, int] = None , __lowerCamelCase: float = None , __lowerCamelCase: PILImageResampling = None , __lowerCamelCase: bool = None , __lowerCamelCase: float = None , __lowerCamelCase: bool = None , __lowerCamelCase: Optional[Union[float, List[float]]] = None , __lowerCamelCase: Optional[Union[float, List[float]]] = None , __lowerCamelCase: Optional[Union[str, TensorType]] = None , __lowerCamelCase: ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase: Any , ):
'''simple docstring'''
UpperCamelCase__: int = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__: Tuple = crop_pct if crop_pct is not None else self.crop_pct
UpperCamelCase__: Union[str, Any] = resample if resample is not None else self.resample
UpperCamelCase__: Any = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__: str = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__: int = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__: Union[str, Any] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__: int = image_std if image_std is not None else self.image_std
UpperCamelCase__: List[Any] = size if size is not None else self.size
UpperCamelCase__: Any = get_size_dict(_a , default_to_square=_a )
UpperCamelCase__: str = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__: str = [to_numpy_array(_a ) for image in images]
if do_resize:
UpperCamelCase__: List[Any] = [self.resize(image=_a , size=_a , crop_pct=_a , resample=_a ) for image in images]
if do_rescale:
UpperCamelCase__: str = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
UpperCamelCase__: Any = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
UpperCamelCase__: int = [to_channel_dimension_format(_a , _a ) for image in images]
UpperCamelCase__: int = {'pixel_values': images}
return BatchFeature(data=_a , tensor_type=_a )
| 380
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 459
| 0
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase : Any = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase : Optional[int] = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=3_0_5_2_2, type=int)
lowercase : int = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, """rb""") as fp:
lowercase : List[str] = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
lowercase : Any = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase : Optional[Any] = [0] * args.vocab_size
for k, v in counter.items():
lowercase : Dict = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 392
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : Optional[int] = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : List[Any] = '''vit_msn'''
def __init__( self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1e-06 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=True , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(**lowercase)
a__ : Tuple = hidden_size
a__ : Optional[Any] = num_hidden_layers
a__ : str = num_attention_heads
a__ : Optional[Any] = intermediate_size
a__ : Optional[Any] = hidden_act
a__ : int = hidden_dropout_prob
a__ : Optional[int] = attention_probs_dropout_prob
a__ : List[Any] = initializer_range
a__ : Optional[int] = layer_norm_eps
a__ : List[str] = image_size
a__ : Optional[int] = patch_size
a__ : List[str] = num_channels
a__ : Dict = qkv_bias
| 392
| 1
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class A__ :
"""simple docstring"""
pass
| 37
|
"""simple docstring"""
import operator
def A_ (__a , __a = False , __a = None ):
'''simple docstring'''
A_ = operator.lt if reverse else operator.gt
A_ = solution or []
if not arr:
return solution
A_ = [arr.pop(0 )]
for i, item in enumerate(__a ):
if _operator(__a , sublist[-1] ):
sublist.append(__a )
arr.pop(__a )
# merging sublist into solution list
if not solution:
solution.extend(__a )
else:
while sublist:
A_ = sublist.pop(0 )
for i, xx in enumerate(__a ):
if not _operator(__a , __a ):
solution.insert(__a , __a )
break
else:
solution.append(__a )
strand_sort(__a , __a , __a )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 115
| 0
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCamelCase : List[str] = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def __snake_case ( UpperCamelCase__=True ) -> Union[str, Any]:
"""simple docstring"""
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=UpperCAmelCase_ ) )
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = None
lowerCAmelCase = None
def __a ( self : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] ):
with TemporaryDirectory() as tmp_dir:
A = dataset_module_factory(_lowercase , cache_dir=_lowercase )
A = import_main_class(dataset_module.module_path , dataset=_lowercase )
A = builder_cls(
cache_dir=_lowercase , config_name=_lowercase , hash=dataset_module.hash , )
A = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_lowercase ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
A = cached_path(_lowercase , cache_dir=_lowercase )
self.assertTrue(os.path.exists(_lowercase ) )
@pytest.mark.integration
def __snake_case ( UpperCamelCase__ ) -> Dict:
"""simple docstring"""
A = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
A = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
A = import_main_class(dataset_module.module_path )
A = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
A = None
builder_instance.download_and_prepare()
A = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __snake_case ( UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
A = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
A = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ )
A = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
A = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert "train" in ds
assert isinstance(ds['train'] , UpperCamelCase__ )
assert next(iter(ds['train'] ) )
| 711
|
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
if not nums:
return 0
A = nums[0]
A = 0
for num in nums[1:]:
A , A = (
max_excluding + num,
max(UpperCamelCase__ , UpperCamelCase__ ),
)
return max(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__UpperCamelCase : str = logging.get_logger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self: List[Any] , **UpperCamelCase: str ) -> Any:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
snake_case__ = deprecated_arg[3:]
snake_case__ = not kwargs.pop(UpperCamelCase )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
snake_case__ = kwargs.pop('tpu_name' , self.tpu_name )
snake_case__ = kwargs.pop('device_idx' , self.device_idx )
snake_case__ = kwargs.pop('eager_mode' , self.eager_mode )
snake_case__ = kwargs.pop('use_xla' , self.use_xla )
super().__init__(**UpperCamelCase )
_UpperCAmelCase = field(
default=a_ , metadata={"help": "Name of TPU"} , )
_UpperCAmelCase = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
_UpperCAmelCase = field(default=a_ , metadata={"help": "Benchmark models in eager model."} )
_UpperCAmelCase = field(
default=a_ , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def lowerCAmelCase_ ( self: Tuple ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['tf'] )
snake_case__ = None
if self.tpu:
try:
if self.tpu_name:
snake_case__ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
snake_case__ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
snake_case__ = None
return tpu
@cached_property
def lowerCAmelCase_ ( self: Any ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
snake_case__ = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
snake_case__ = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
snake_case__ = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def lowerCAmelCase_ ( self: Union[str, Any] ) -> bool:
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def lowerCAmelCase_ ( self: Any ) -> "tf.distribute.Strategy":
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]:
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def lowerCAmelCase_ ( self: Any ) -> int:
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowerCAmelCase_ ( self: List[str] ) -> bool:
return self.n_gpu > 0
| 328
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "char"
_UpperCAmelCase = "bpe"
_UpperCAmelCase = "wp"
__UpperCamelCase : Optional[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = ["image_processor", "char_tokenizer"]
_UpperCAmelCase = "ViTImageProcessor"
_UpperCAmelCase = "MgpstrTokenizer"
def __init__( self: Optional[int] , UpperCamelCase: Dict=None , UpperCamelCase: Any=None , **UpperCamelCase: Any ) -> str:
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
snake_case__ = kwargs.pop('feature_extractor' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
snake_case__ = tokenizer
snake_case__ = AutoTokenizer.from_pretrained('gpt2' )
snake_case__ = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self: str , UpperCamelCase: List[str]=None , UpperCamelCase: Any=None , UpperCamelCase: Optional[Any]=None , **UpperCamelCase: Optional[int] ) -> List[str]:
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
snake_case__ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None:
snake_case__ = self.char_tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings['input_ids']
return inputs
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[str] ) -> int:
snake_case__ , snake_case__ , snake_case__ = sequences
snake_case__ = char_preds.size(0 )
snake_case__ , snake_case__ = self._decode_helper(UpperCamelCase , 'char' )
snake_case__ , snake_case__ = self._decode_helper(UpperCamelCase , 'bpe' )
snake_case__ , snake_case__ = self._decode_helper(UpperCamelCase , 'wp' )
snake_case__ = []
snake_case__ = []
for i in range(UpperCamelCase ):
snake_case__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
snake_case__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
snake_case__ = scores.index(max(UpperCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
snake_case__ = {}
snake_case__ = final_strs
snake_case__ = final_scores
snake_case__ = char_strs
snake_case__ = bpe_strs
snake_case__ = wp_strs
return out
def lowerCAmelCase_ ( self: str , UpperCamelCase: str , UpperCamelCase: Tuple ) -> Optional[int]:
if format == DecodeType.CHARACTER:
snake_case__ = self.char_decode
snake_case__ = 1
snake_case__ = '[s]'
elif format == DecodeType.BPE:
snake_case__ = self.bpe_decode
snake_case__ = 2
snake_case__ = '#'
elif format == DecodeType.WORDPIECE:
snake_case__ = self.wp_decode
snake_case__ = 1_02
snake_case__ = '[SEP]'
else:
raise ValueError(F'''Format {format} is not supported.''' )
snake_case__ , snake_case__ = [], []
snake_case__ = pred_logits.size(0 )
snake_case__ = pred_logits.size(1 )
snake_case__ , snake_case__ = pred_logits.topk(1 , dim=-1 , largest=UpperCamelCase , sorted=UpperCamelCase )
snake_case__ = preds_index.view(-1 , UpperCamelCase )[:, 1:]
snake_case__ = decoder(UpperCamelCase )
snake_case__ , snake_case__ = torch.nn.functional.softmax(UpperCamelCase , dim=2 ).max(dim=2 )
snake_case__ = preds_max_prob[:, 1:]
for index in range(UpperCamelCase ):
snake_case__ = preds_str[index].find(UpperCamelCase )
snake_case__ = preds_str[index][:pred_eos]
snake_case__ = preds_index[index].cpu().tolist()
snake_case__ = pred_index.index(UpperCamelCase ) if eos_token in pred_index else -1
snake_case__ = preds_max_prob[index][: pred_eos_index + 1]
snake_case__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(UpperCamelCase )
conf_scores.append(UpperCamelCase )
return dec_strs, conf_scores
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: str ) -> int:
snake_case__ = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(UpperCamelCase )]
return decode_strs
def lowerCAmelCase_ ( self: int , UpperCamelCase: Optional[int] ) -> Dict:
return self.bpe_tokenizer.batch_decode(UpperCamelCase )
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: str ) -> Union[str, Any]:
snake_case__ = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(UpperCamelCase )]
return decode_strs
| 328
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase :Any = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'audio-spectrogram-transformer'
def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=16 , lowercase=True , lowercase=10 , lowercase=10 , lowercase=1024 , lowercase=128 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[str] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : int = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : Tuple = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : List[str] = initializer_range
A_ : Tuple = layer_norm_eps
A_ : List[Any] = patch_size
A_ : Optional[Any] = qkv_bias
A_ : Tuple = frequency_stride
A_ : str = time_stride
A_ : Tuple = max_length
A_ : List[str] = num_mel_bins
| 686
|
'''simple docstring'''
import pytest
lowerCamelCase :Optional[Any] = '''__dummy_dataset1__'''
lowerCamelCase :List[Any] = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = dataset_loading_script_name
A_ : int = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase__ )
A_ : Tuple = script_dir / f'{script_name}.py'
with open(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ )
| 686
| 1
|
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
UpperCamelCase_: int = len(UpperCAmelCase__ )
UpperCamelCase_: int = len(UpperCAmelCase__ )
UpperCamelCase_: int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
UpperCamelCase_: list = []
for char_count in range(UpperCAmelCase__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ')
| 57
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :int = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[Any] = AutoConfig.from_pretrained('''gpt2''' )
UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[str] = GenerationConfig()
UpperCamelCase :List[str] = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
UpperCamelCase :Dict = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = generation_config.update(**SCREAMING_SNAKE_CASE_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(SCREAMING_SNAKE_CASE_ , {'''foo''': '''bar'''} )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[Any] = GenerationConfig()
UpperCamelCase :Tuple = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ )
assert not hasattr(SCREAMING_SNAKE_CASE_ , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Dict = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(default_config.num_beams , 1 )
UpperCamelCase :Tuple = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls ) -> Optional[Any]:
UpperCamelCase :List[str] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
@classmethod
def UpperCAmelCase ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Optional[Any] = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
UpperCamelCase :List[Any] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''test-generation-config''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
UpperCamelCase :Any = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
UpperCamelCase :Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
UpperCamelCase :Tuple = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
| 658
| 0
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : Any = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''unispeech'''
def __init__( self :Union[str, Any] , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple=768 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :List[str]=3_072 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :List[str]=0.0 , lowerCAmelCase__ :Any=0.1 , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :str=0.0_2 , lowerCAmelCase__ :Tuple=1E-5 , lowerCAmelCase__ :Optional[Any]="group" , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :Optional[Any]=(512, 512, 512, 512, 512, 512, 512) , lowerCAmelCase__ :Optional[int]=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__ :Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :Tuple=128 , lowerCAmelCase__ :Optional[Any]=16 , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.0_5 , lowerCAmelCase__ :Any=10 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :List[Any]=0.0 , lowerCAmelCase__ :Any=10 , lowerCAmelCase__ :str=0 , lowerCAmelCase__ :List[str]=320 , lowerCAmelCase__ :Optional[Any]=2 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :List[Any]=100 , lowerCAmelCase__ :Tuple=256 , lowerCAmelCase__ :Optional[int]=256 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :Optional[Any]="mean" , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[str]=256 , lowerCAmelCase__ :Dict=80 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :List[Any]=1 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :int=0.5 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
snake_case_ : int = hidden_size
snake_case_ : List[Any] = feat_extract_norm
snake_case_ : List[str] = feat_extract_activation
snake_case_ : int = list(lowerCAmelCase__ )
snake_case_ : Dict = list(lowerCAmelCase__ )
snake_case_ : Any = list(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = conv_bias
snake_case_ : int = num_conv_pos_embeddings
snake_case_ : Optional[Any] = num_conv_pos_embedding_groups
snake_case_ : Any = len(self.conv_dim )
snake_case_ : List[str] = num_hidden_layers
snake_case_ : List[Any] = intermediate_size
snake_case_ : int = hidden_act
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : int = hidden_dropout
snake_case_ : Dict = attention_dropout
snake_case_ : Union[str, Any] = activation_dropout
snake_case_ : Dict = feat_proj_dropout
snake_case_ : Optional[Any] = final_dropout
snake_case_ : Tuple = layerdrop
snake_case_ : str = layer_norm_eps
snake_case_ : Dict = initializer_range
snake_case_ : List[Any] = num_ctc_classes
snake_case_ : Optional[int] = vocab_size
snake_case_ : Any = do_stable_layer_norm
snake_case_ : Any = use_weighted_layer_sum
snake_case_ : Union[str, Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ : int = apply_spec_augment
snake_case_ : Tuple = mask_time_prob
snake_case_ : str = mask_time_length
snake_case_ : List[Any] = mask_time_min_masks
snake_case_ : str = mask_feature_prob
snake_case_ : Optional[int] = mask_feature_length
snake_case_ : Any = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case_ : Optional[Any] = num_codevectors_per_group
snake_case_ : Dict = num_codevector_groups
snake_case_ : List[Any] = contrastive_logits_temperature
snake_case_ : Dict = feat_quantizer_dropout
snake_case_ : Dict = num_negatives
snake_case_ : Optional[Any] = codevector_dim
snake_case_ : Any = proj_codevector_dim
snake_case_ : str = diversity_loss_weight
# ctc loss
snake_case_ : Optional[Any] = ctc_loss_reduction
snake_case_ : List[Any] = ctc_zero_infinity
# pretraining loss
snake_case_ : Optional[int] = replace_prob
@property
def _A ( self :Tuple ) -> str:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 656
|
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=None ,**__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : int = [x.strip() for x in open(__magic_name__ ).readlines()]
snake_case_ : Optional[int] = [x.strip() for x in open(__magic_name__ ).readlines()][: len(__magic_name__ )]
snake_case_ : List[Any] = calculate_rouge(__magic_name__ ,__magic_name__ ,**__magic_name__ )
if save_path is not None:
save_json(__magic_name__ ,__magic_name__ ,indent=__magic_name__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 656
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A__ :
A__ = 42
A__ = 42
class A__ :
def __init__( self : str , _a : int ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[[] for _ in range(_a )]
_SCREAMING_SNAKE_CASE =size
def __getitem__( self : Union[str, Any] , _a : int ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def A ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self._size
def A ( self : Union[str, Any] , _a : int , _a : int , _a : int ) -> Any:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(_a , _a ) )
def A ( self : str , _a : int , _a : int ) -> int | None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =deque([start_vertex] )
_SCREAMING_SNAKE_CASE =[None] * self.size
_SCREAMING_SNAKE_CASE =0
while queue:
_SCREAMING_SNAKE_CASE =queue.popleft()
_SCREAMING_SNAKE_CASE =distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_SCREAMING_SNAKE_CASE =current_distance + edge.weight
_SCREAMING_SNAKE_CASE =distances[edge.destination_vertex]
if (
isinstance(_a , _a )
and new_distance >= dest_vertex_distance
):
continue
_SCREAMING_SNAKE_CASE =new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 405
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCamelCase : Optional[int] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
lowerCamelCase : Union[str, Any] = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCamelCase : str = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCamelCase : Optional[Any] = sorted(arg_to_scheduler.keys())
lowerCamelCase : int = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class A__ ( pl.LightningModule ):
def __init__( self : Optional[Any] , _a : argparse.Namespace , _a : Any=None , _a : int="base" , _a : Dict=None , _a : Tuple=None , _a : Any=None , **_a : List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_a )
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =Path(self.hparams.output_dir )
_SCREAMING_SNAKE_CASE =self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_a , **_a , )
else:
_SCREAMING_SNAKE_CASE =config
_SCREAMING_SNAKE_CASE =('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , _a , _a ):
assert hasattr(self.config , _a ), f"model config doesn't have a `{p}` attribute"
setattr(self.config , _a , getattr(self.hparams , _a ) )
if tokenizer is None:
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_a , )
else:
_SCREAMING_SNAKE_CASE =tokenizer
_SCREAMING_SNAKE_CASE =MODEL_MODES[mode]
if model is None:
_SCREAMING_SNAKE_CASE =self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_a , )
else:
_SCREAMING_SNAKE_CASE =model
def A ( self : List[Any] , *_a : Optional[int] , **_a : int ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_type.from_pretrained(*_a , **_a )
def A ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =arg_to_scheduler[self.hparams.lr_scheduler]
_SCREAMING_SNAKE_CASE =get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
_SCREAMING_SNAKE_CASE ={'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def A ( self : str ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model
_SCREAMING_SNAKE_CASE =['bias', 'LayerNorm.weight']
_SCREAMING_SNAKE_CASE =[
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
_SCREAMING_SNAKE_CASE =Adafactor(
_a , lr=self.hparams.learning_rate , scale_parameter=_a , relative_step=_a )
else:
_SCREAMING_SNAKE_CASE =AdamW(
_a , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
_SCREAMING_SNAKE_CASE =optimizer
_SCREAMING_SNAKE_CASE =self.get_lr_scheduler()
return [optimizer], [scheduler]
def A ( self : Tuple , _a : Dict , _a : List[str] ) -> str:
'''simple docstring'''
return self.validation_step(_a , _a )
def A ( self : Dict , _a : str ) -> Dict:
'''simple docstring'''
return self.validation_end(_a )
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
_SCREAMING_SNAKE_CASE =self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def A ( self : int , _a : Any ) -> Union[str, Any]:
'''simple docstring'''
if stage == "test":
_SCREAMING_SNAKE_CASE =len(self.test_dataloader().dataset )
else:
_SCREAMING_SNAKE_CASE =self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_a )
_SCREAMING_SNAKE_CASE =len(self.train_dataloader().dataset )
def A ( self : Union[str, Any] , _a : str , _a : int , _a : bool = False ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def A ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return self.train_loader
def A ( self : str ) -> str:
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_a )
def A ( self : Tuple ) -> Any:
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_a )
def A ( self : int , _a : Dict ) -> Optional[Any]:
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
_a , list(filter(_a , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def A ( self : Optional[Any] , _a : Dict[str, Any] ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.output_dir.joinpath('best_tfmr' )
_SCREAMING_SNAKE_CASE =self.step_count
self.model.save_pretrained(_a )
self.tokenizer.save_pretrained(_a )
@staticmethod
def A ( _a : Any , _a : Any ) -> List[str]:
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=_a , type=_a , required=_a , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=_a , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=_a , type=_a , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(_a ).parent / 'test_run' / 'cache' ) , type=_a , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=_a , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=_a , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=_a , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=_a , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5e-5 , type=_a , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=_a , metavar=_a , type=_a , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=_a , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=_a , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=_a , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=_a , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_a )
parser.add_argument('--train_batch_size' , default=32 , type=_a )
parser.add_argument('--eval_batch_size' , default=32 , type=_a )
parser.add_argument('--adafactor' , action='store_true' )
class A__ ( pl.Callback ):
def A ( self : Tuple , _a : str , _a : int ) -> Dict:
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class A__ ( pl.Callback ):
def A ( self : Tuple , _a : str , _a : Tuple ) -> Tuple:
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_a )
class A__ ( pl.Callback ):
def A ( self : List[str] , _a : Tuple , _a : Optional[int] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =trainer.lr_schedulers[0]['scheduler']
_SCREAMING_SNAKE_CASE ={f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_a )
def A ( self : List[Any] , _a : pl.Trainer , _a : pl.LightningModule ) -> Optional[int]:
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
_SCREAMING_SNAKE_CASE =trainer.callback_metrics
# Log results
for key in sorted(_a ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_a , str(metrics[key] ) ) )
def A ( self : Optional[Any] , _a : pl.Trainer , _a : pl.LightningModule ) -> List[str]:
'''simple docstring'''
rank_zero_info('***** Test results *****' )
_SCREAMING_SNAKE_CASE =trainer.callback_metrics
# Log and save results to file
_SCREAMING_SNAKE_CASE =os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(_a , 'w' ) as writer:
for key in sorted(_a ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_a , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(_a , str(metrics[key] ) ) )
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] ) -> None:
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(_UpperCamelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=_UpperCamelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_UpperCamelCase , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=_UpperCamelCase )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=_UpperCamelCase , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=_UpperCamelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=_UpperCamelCase , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(_UpperCamelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=_UpperCamelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def _lowerCAmelCase ( _UpperCamelCase : BaseTransformer , _UpperCamelCase : argparse.Namespace , _UpperCamelCase : Any=None , _UpperCamelCase : str=True , _UpperCamelCase : Any=[] , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : str=None , **_UpperCamelCase : Optional[int] , ) -> Tuple:
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
_SCREAMING_SNAKE_CASE =Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_UpperCamelCase )
# add custom checkpoints
if checkpoint_callback is None:
_SCREAMING_SNAKE_CASE =pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_UpperCamelCase )
if logging_callback is None:
_SCREAMING_SNAKE_CASE =LoggingCallback()
_SCREAMING_SNAKE_CASE ={}
if args.fpaa:
_SCREAMING_SNAKE_CASE =16
if args.gpus > 1:
_SCREAMING_SNAKE_CASE ='auto'
_SCREAMING_SNAKE_CASE ='ddp'
_SCREAMING_SNAKE_CASE =args.accumulate_grad_batches
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE ='auto'
_SCREAMING_SNAKE_CASE =pl.Trainer.from_argparse_args(
_UpperCamelCase , weights_summary=_UpperCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_UpperCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **_UpperCamelCase , )
if args.do_train:
trainer.fit(_UpperCamelCase )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 405
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Union[str, Any] = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 188
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : Tuple ) -> int:
lowerCamelCase__ : Optional[int] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase__ : Dict = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(UpperCAmelCase ) , torch_builtin(UpperCAmelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCAmelCase ) , gelu_new(UpperCAmelCase ) ) )
def A_ ( self : Dict ) -> str:
lowerCamelCase__ : Optional[int] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase__ : str = get_activation('gelu' )
lowerCamelCase__ : Tuple = get_activation('gelu_10' )
lowerCamelCase__ : Tuple = torch_builtin(UpperCAmelCase )
lowerCamelCase__ : List[str] = geluaa(UpperCAmelCase )
lowerCamelCase__ : Tuple = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCAmelCase ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def A_ ( self : str ) -> List[str]:
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(UpperCAmelCase ):
get_activation('bogus' )
with self.assertRaises(UpperCAmelCase ):
get_activation(UpperCAmelCase )
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : Optional[Any] = get_activation('gelu' )
lowerCamelCase__ : Union[str, Any] = 1
lowerCamelCase__ : Optional[Any] = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCAmelCase ):
lowerCamelCase__ : str = acta.a
| 188
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.