code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : Optional[int] = [1]
for i in range(2 , _UpperCamelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
snake_case_ : int = []
snake_case_ : Dict = list(range(_UpperCamelCase ) )
# Find permutation
while factorials:
snake_case_ : int = factorials.pop()
snake_case_ , snake_case_ : Tuple = divmod(_UpperCamelCase , _UpperCamelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60
|
from typing import Any
class __lowercase :
def __init__( self , lowercase_) -> str:
__snake_case = data
__snake_case = None
def __repr__( self) -> str:
return F"Node({self.data})"
class __lowercase :
def __init__( self) -> Dict:
__snake_case = None
def __iter__( self) -> Any:
__snake_case = self.head
while node:
yield node.data
__snake_case = node.next
def __len__( self) -> int:
return sum(1 for _ in self)
def __repr__( self) -> str:
return "->".join([str(lowercase_) for item in self])
def __getitem__( self , lowercase_) -> Any:
if not 0 <= index < len(self):
raise ValueError('list index out of range.')
for i, node in enumerate(self):
if i == index:
return node
return None
def __setitem__( self , lowercase_ , lowercase_) -> None:
if not 0 <= index < len(self):
raise ValueError('list index out of range.')
__snake_case = self.head
for _ in range(lowercase_):
__snake_case = current.next
__snake_case = data
def _a ( self , lowercase_) -> None:
self.insert_nth(len(self) , lowercase_)
def _a ( self , lowercase_) -> None:
self.insert_nth(0 , lowercase_)
def _a ( self , lowercase_ , lowercase_) -> None:
if not 0 <= index <= len(self):
raise IndexError('list index out of range')
__snake_case = Node(lowercase_)
if self.head is None:
__snake_case = new_node
elif index == 0:
__snake_case = self.head # link new_node to head
__snake_case = new_node
else:
__snake_case = self.head
for _ in range(index - 1):
__snake_case = temp.next
__snake_case = temp.next
__snake_case = new_node
def _a ( self) -> None: # print every node data
print(self)
def _a ( self) -> Any:
return self.delete_nth(0)
def _a ( self) -> Any: # delete from tail
return self.delete_nth(len(self) - 1)
def _a ( self , lowercase_ = 0) -> Any:
if not 0 <= index <= len(self) - 1: # test if index is valid
raise IndexError('List index out of range.')
__snake_case = self.head # default first node
if index == 0:
__snake_case = self.head.next
else:
__snake_case = self.head
for _ in range(index - 1):
__snake_case = temp.next
__snake_case = temp.next
__snake_case = temp.next.next
return delete_node.data
def _a ( self) -> bool:
return self.head is None
def _a ( self) -> None:
__snake_case = None
__snake_case = self.head
while current:
# Store the current node's next node.
__snake_case = current.next
# Make the current node's next point backwards
__snake_case = prev
# Make the previous node be the current node
__snake_case = current
# Make the current node the next node (to progress iteration)
__snake_case = next_node
# Return prev in order to put the head at the end
__snake_case = prev
def A ( ) -> None:
'''simple docstring'''
__snake_case = LinkedList()
assert linked_list.is_empty() is True
assert str(snake_case__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(snake_case__ ) == i
linked_list.insert_nth(snake_case__ , i + 1 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(snake_case__ ) == 9
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__snake_case = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(-8 , 1 ) )
def A ( ) -> None:
'''simple docstring'''
__snake_case = [
-9,
100,
Node(7734_5112 ),
'dlrow olleH',
7,
5555,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
__snake_case = LinkedList()
for i in test_input:
linked_list.insert_tail(snake_case__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(snake_case__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__snake_case = linked_list.delete_head()
assert result == -9
assert (
str(snake_case__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__snake_case = linked_list.delete_tail()
assert result == 12.2
assert (
str(snake_case__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__snake_case = linked_list.delete_nth(10 )
assert result is None
assert (
str(snake_case__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(snake_case__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(snake_case__ )
assert (
str(snake_case__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(snake_case__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def A ( ) -> str:
'''simple docstring'''
from doctest import testmod
testmod()
__snake_case = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(snake_case__ )
print('\nReading/changing Node data using indexing:' )
print(f"Element at Position 1: {linked_list[1]}" )
__snake_case = input('Enter New Value: ' ).strip()
print('New list:' )
print(snake_case__ )
print(f"length of linked_list is : {len(snake_case__ )}" )
if __name__ == "__main__":
main()
| 313
| 0
|
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_lowerCAmelCase = 2048
_lowerCAmelCase = 4096
_lowerCAmelCase = 42
_lowerCAmelCase = os.environ.pop("PROCESS_TRAIN", "false")
_lowerCAmelCase = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def UpperCamelCase ( a ) -> List[Any]:
'''simple docstring'''
def choose_first(a , a=False ):
assert isinstance(a , a )
if len(a ) == 1:
__magic_name__ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__magic_name__ = {k: [a[k]] for k in a}
if len(a['''start_token'''] ) > 0:
break
return a
__magic_name__ = {'''id''': example['''id''']}
__magic_name__ = example['''annotations''']
__magic_name__ = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
__magic_name__ = ['''yes'''] if 1 in yes_no_answer else ['''no''']
__magic_name__ = __magic_name__ = []
__magic_name__ = __magic_name__ = []
__magic_name__ = ['''<cls>''']
else:
__magic_name__ = ['''short''']
__magic_name__ = choose_first(annotation['''short_answers'''] )
if len(out['''start_token'''] ) == 0:
# answer will be long if short is not available
__magic_name__ = ['''long''']
__magic_name__ = choose_first(annotation['''long_answer'''] , is_long_answer=a )
__magic_name__ = []
answer.update(a )
# disregard some samples
if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]:
__magic_name__ = True
else:
__magic_name__ = False
__magic_name__ = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] , a ) for k in cols ):
raise ValueError('''Issue in ID''' , example['''id'''] )
return answer
def UpperCamelCase ( a , a=False ) -> Tuple:
'''simple docstring'''
__magic_name__ = _get_single_answer(a )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ = example['''document''']['''tokens''']
__magic_name__ = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
return {
"context": " ".join(a ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__magic_name__ = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__magic_name__ = example['''document''']['''tokens''']
__magic_name__ = answer['''start_token''']
__magic_name__ = answer['''end_token''']
__magic_name__ = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__magic_name__ = ''' '''.join(context[start_token:end_token] )
# checking above code
if assertion:
__magic_name__ = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
__magic_name__ = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
__magic_name__ = ''' '''.join([old[i] for i in range(len(a ) ) if not is_html[i]] )
if new != old:
print('''ID:''' , example['''id'''] )
print('''New:''' , a , end='''\n''' )
print('''Old:''' , a , end='''\n\n''' )
return {
"context": " ".join(a ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def UpperCamelCase ( a , a , a=2048 , a=4096 , a=True ) -> Any:
'''simple docstring'''
# overlap will be of doc_stride - q_len
__magic_name__ = get_context_and_ans(a , assertion=a )
__magic_name__ = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__magic_name__ = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids
__magic_name__ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ = []
__magic_name__ = []
__magic_name__ = input_ids[:q_len]
__magic_name__ = range(a , len(a ) , max_length - doc_stride )
for i in doc_start_indices:
__magic_name__ = i + max_length - q_len
__magic_name__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['''category'''][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(a ),
"end_token": [-100] * len(a ),
"category": category,
},
}
__magic_name__ = out['''context'''].split()
__magic_name__ = splitted_context[answer['''end_token''']]
__magic_name__ = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=a , ).input_ids )
__magic_name__ = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=a ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__magic_name__ = len(tokenizer(a , add_special_tokens=a ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__magic_name__ = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
__magic_name__ = answer['''start_token''']
__magic_name__ = answer['''end_token''']
if assertion:
__magic_name__ = tokenizer.decode(a )
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''' )
print('''OLD:''' , answer['''span'''] )
print('''NEW:''' , a , end='''\n\n''' )
if len(a ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__magic_name__ = input_ids[:q_len]
__magic_name__ = range(a , len(a ) , max_length - doc_stride )
__magic_name__ = []
__magic_name__ = []
__magic_name__ = []
__magic_name__ = [] # null, yes, no, long, short
for i in doc_start_indices:
__magic_name__ = i + max_length - q_len
__magic_name__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__magic_name__ = start_token - i + q_len
__magic_name__ = end_token - i + q_len
answers_category.append(answer['''category'''][0] ) # ["short"] -> "short"
else:
__magic_name__ = -100
__magic_name__ = -100
answers_category.append('''null''' )
__magic_name__ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(a )
answers_end_token.append(a )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' , example['''id'''] )
print('''New:''' , tokenizer.decode(a ) )
print('''Old:''' , tokenizer.decode(a ) , end='''\n\n''' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def UpperCamelCase ( a , a , a=2048 , a=4096 , a=False ) -> Any:
'''simple docstring'''
__magic_name__ = get_strided_contexts_and_ans(
a , a , doc_stride=a , max_length=a , assertion=a , )
return example
def UpperCamelCase ( a , a ) -> Optional[Any]:
'''simple docstring'''
with jsonlines.open(a , '''a''' ) as writer:
for example in tqdm(a , total=len(a ) , desc='''Saving samples ... ''' ):
__magic_name__ = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_lowerCAmelCase = load_dataset("natural_questions")
_lowerCAmelCase = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
_lowerCAmelCase = data["train" if PROCESS_TRAIN == "true" else "validation"]
_lowerCAmelCase = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
_lowerCAmelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_lowerCAmelCase = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
_lowerCAmelCase = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 245
|
'''simple docstring'''
from collections.abc import Callable
def UpperCamelCase ( a , a , a ) -> float:
'''simple docstring'''
__magic_name__ = a
__magic_name__ = b
if function(a ) == 0: # one of the a or b is a root for the function
return a
elif function(a ) == 0:
return b
elif (
function(a ) * function(a ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
__magic_name__ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(a ) == 0:
return mid
elif function(a ) * function(a ) < 0:
__magic_name__ = mid
else:
__magic_name__ = mid
__magic_name__ = start + (end - start) / 2.0
return mid
def UpperCamelCase ( a ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 245
| 1
|
lowerCamelCase : Optional[Any] = 8.314_4598
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : float ):
'''simple docstring'''
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowerCamelCase : Tuple = 300
lowerCamelCase : str = 28
lowerCamelCase : List[Any] = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 70
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( __a):
__a : int = """data2vec-vision"""
def __init__( self , _A=7_68 , _A=12 , _A=12 , _A=30_72 , _A="gelu" , _A=0.0 , _A=0.0 , _A=0.02 , _A=1e-12 , _A=2_24 , _A=16 , _A=3 , _A=False , _A=False , _A=False , _A=False , _A=0.1 , _A=0.1 , _A=True , _A=[3, 5, 7, 11] , _A=[1, 2, 3, 6] , _A=True , _A=0.4 , _A=2_56 , _A=1 , _A=False , _A=2_55 , **_A , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_A )
_UpperCAmelCase : Union[str, Any] = hidden_size
_UpperCAmelCase : Union[str, Any] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : str = initializer_range
_UpperCAmelCase : Any = layer_norm_eps
_UpperCAmelCase : str = image_size
_UpperCAmelCase : List[str] = patch_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : Tuple = use_mask_token
_UpperCAmelCase : Union[str, Any] = use_absolute_position_embeddings
_UpperCAmelCase : Dict = use_relative_position_bias
_UpperCAmelCase : Tuple = use_shared_relative_position_bias
_UpperCAmelCase : List[Any] = layer_scale_init_value
_UpperCAmelCase : Tuple = drop_path_rate
_UpperCAmelCase : Optional[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_UpperCAmelCase : Union[str, Any] = out_indices
_UpperCAmelCase : List[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase : Optional[int] = use_auxiliary_head
_UpperCAmelCase : List[Any] = auxiliary_loss_weight
_UpperCAmelCase : List[Any] = auxiliary_channels
_UpperCAmelCase : Tuple = auxiliary_num_convs
_UpperCAmelCase : Union[str, Any] = auxiliary_concat_input
_UpperCAmelCase : Optional[int] = semantic_loss_ignore_index
class _UpperCAmelCase ( __a):
__a : Tuple = version.parse("""1.11""")
@property
def __snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __snake_case ( self ) -> float:
'''simple docstring'''
return 1e-4
| 238
| 0
|
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class UpperCamelCase ( ctypes.Structure ):
"""simple docstring"""
# _fields is a specific attr expected by ctypes
SCREAMING_SNAKE_CASE_ : int = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def __SCREAMING_SNAKE_CASE ( ):
if os.name == "nt":
_lowercase : Optional[int] = CursorInfo()
_lowercase : Any = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCAmelCase , ctypes.byref(__UpperCAmelCase ) )
_lowercase : Optional[int] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCAmelCase , ctypes.byref(__UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def __SCREAMING_SNAKE_CASE ( ):
if os.name == "nt":
_lowercase : str = CursorInfo()
_lowercase : Dict = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCAmelCase , ctypes.byref(__UpperCAmelCase ) )
_lowercase : Dict = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCAmelCase , ctypes.byref(__UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def __SCREAMING_SNAKE_CASE ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 600
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_lowercase : Union[str, Any] = mf_knapsack(i - 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
_lowercase : Tuple = max(
mf_knapsack(i - 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , mf_knapsack(i - 1 , __UpperCAmelCase , __UpperCAmelCase , j - wt[i - 1] ) + val[i - 1] , )
_lowercase : int = val
return f[i][j]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Any = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_lowercase : Optional[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_lowercase : Union[str, Any] = dp[i - 1][w_]
return dp[n][w_], dp
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if not (isinstance(__UpperCAmelCase , (list, tuple) ) and isinstance(__UpperCAmelCase , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
_lowercase : List[str] = len(__UpperCAmelCase )
if num_items != len(__UpperCAmelCase ):
_lowercase : Union[str, Any] = (
"""The number of weights must be the same as the number of values.\n"""
F"""But got {num_items} weights and {len(__UpperCAmelCase )} values"""
)
raise ValueError(__UpperCAmelCase )
for i in range(__UpperCAmelCase ):
if not isinstance(wt[i] , __UpperCAmelCase ):
_lowercase : List[str] = (
"""All weights must be integers but got weight of """
F"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(__UpperCAmelCase )
_lowercase , _lowercase : Dict = knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_lowercase : set = set()
_construct_solution(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return optimal_val, example_optional_set
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__UpperCAmelCase , __UpperCAmelCase , i - 1 , __UpperCAmelCase , __UpperCAmelCase )
else:
optimal_set.add(__UpperCAmelCase )
_construct_solution(__UpperCAmelCase , __UpperCAmelCase , i - 1 , j - wt[i - 1] , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase: str = [3, 2, 4, 4]
UpperCAmelCase: Dict = [4, 3, 2, 3]
UpperCAmelCase: Optional[int] = 4
UpperCAmelCase: Optional[int] = 6
UpperCAmelCase: Union[str, Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
UpperCAmelCase , UpperCAmelCase: List[Any] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
UpperCAmelCase , UpperCAmelCase: Union[str, Any] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 600
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = b.T
__lowercase = np.sum(np.square(A__ ) , axis=1 )
__lowercase = np.sum(np.square(A__ ) , axis=0 )
__lowercase = np.matmul(A__ , A__ )
__lowercase = aa[:, None] - 2 * ab + ba[None, :]
return d
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = x.reshape(-1 , 3 )
__lowercase = squared_euclidean_distance(A__ , A__ )
return np.argmin(A__ , axis=1 )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ['pixel_values']
def __init__( self : Any ,lowercase__ : Optional[Union[List[List[int]], np.ndarray]] = None ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowercase__ : bool = True ,lowercase__ : bool = True ,**lowercase__ : Optional[Any] ,):
super().__init__(**lowercase__ )
__lowercase = size if size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
__lowercase = get_size_dict(lowercase__ )
__lowercase = np.array(lowercase__ ) if clusters is not None else None
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_normalize
__lowercase = do_color_quantize
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : np.ndarray ,lowercase__ : Dict[str, int] ,lowercase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : str ,):
__lowercase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}" )
return resize(
lowercase__ ,size=(size['''height'''], size['''width''']) ,resample=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,):
__lowercase = rescale(image=lowercase__ ,scale=1 / 1_2_7.5 ,data_format=lowercase__ )
__lowercase = image - 1
return image
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : ImageInput ,lowercase__ : bool = None ,lowercase__ : Dict[str, int] = None ,lowercase__ : PILImageResampling = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[Union[List[List[int]], np.ndarray]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST ,**lowercase__ : Any ,):
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(lowercase__ )
__lowercase = resample if resample is not None else self.resample
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__lowercase = clusters if clusters is not None else self.clusters
__lowercase = np.array(lowercase__ )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=lowercase__ ,size=lowercase__ ,resample=lowercase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
if do_color_quantize:
__lowercase = [to_channel_dimension_format(lowercase__ ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__lowercase = np.array(lowercase__ )
__lowercase = color_quantize(lowercase__ ,lowercase__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__lowercase = images.shape[0]
__lowercase = images.reshape(lowercase__ ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__lowercase = list(lowercase__ )
else:
__lowercase = [to_channel_dimension_format(lowercase__ ,lowercase__ ) for image in images]
__lowercase = {'''input_ids''': images}
return BatchFeature(data=lowercase__ ,tensor_type=lowercase__ )
| 41
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase : List[Any] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ) -> str:
UpperCamelCase__ : Any = tesseract_config if tesseract_config is not None else ""
# apply OCR
UpperCamelCase__ : int = to_pil_image(__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ : Dict = pil_image.size
UpperCamelCase__ : Optional[Any] = pytesseract.image_to_data(__lowerCAmelCase , lang=__lowerCAmelCase , output_type="dict" , config=__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
UpperCamelCase__ : Tuple = [idx for idx, word in enumerate(__lowerCAmelCase ) if not word.strip()]
UpperCamelCase__ : Tuple = [word for idx, word in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : Union[str, Any] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : List[str] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : Union[str, Any] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : str = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase__ : List[Any] = []
for x, y, w, h in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(__lowerCAmelCase )
# finally, normalize the bounding boxes
UpperCamelCase__ : int = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __a ( A__ ):
_lowerCAmelCase : int = ['''pixel_values''']
def __init__( self : Dict , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[str] = "" , **SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = size if size is not None else {"height": 2_24, "width": 2_24}
UpperCamelCase__ : str = get_size_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = do_resize
UpperCamelCase__ : Union[str, Any] = size
UpperCamelCase__ : List[str] = resample
UpperCamelCase__ : Dict = apply_ocr
UpperCamelCase__ : str = ocr_lang
UpperCamelCase__ : List[str] = tesseract_config
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
UpperCamelCase__ : Union[str, Any] = (size["height"], size["width"])
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
UpperCamelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : Tuple = size if size is not None else self.size
UpperCamelCase__ : Any = get_size_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = resample if resample is not None else self.resample
UpperCamelCase__ : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase__ : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase__ : str = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase__ : Dict = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
UpperCamelCase__ : Dict = []
UpperCamelCase__ : List[Any] = []
for image in images:
UpperCamelCase__ , UpperCamelCase__ : Any = apply_tesseract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
words_batch.append(SCREAMING_SNAKE_CASE )
boxes_batch.append(SCREAMING_SNAKE_CASE )
if do_resize:
UpperCamelCase__ : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCamelCase__ : Any = [flip_channel_order(SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : str = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : Optional[Any] = BatchFeature(data={"pixel_values": images} , tensor_type=SCREAMING_SNAKE_CASE )
if apply_ocr:
UpperCamelCase__ : Tuple = words_batch
UpperCamelCase__ : Dict = boxes_batch
return data
| 228
| 0
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , snake_case : Dict , snake_case : Tuple=7 , snake_case : Optional[Any]=3 , snake_case : List[str]=30 , snake_case : List[str]=400 , snake_case : List[str]=True , snake_case : Any=None , snake_case : List[Any]=True , snake_case : Any=[0.5, 0.5, 0.5] , snake_case : int=[0.5, 0.5, 0.5] , snake_case : Dict=True , snake_case : Optional[Any]=1 / 255 , snake_case : Optional[int]=True , ) -> Optional[Any]:
'''simple docstring'''
A = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
A = parent
A = batch_size
A = num_channels
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_normalize
A = image_mean
A = image_std
A = do_rescale
A = rescale_factor
A = do_pad
def A_ ( self : str ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A_ ( self : List[str] , snake_case : Tuple , snake_case : Optional[Any]=False ) -> Optional[Any]:
'''simple docstring'''
if not batched:
A = image_inputs[0]
if isinstance(snake_case , Image.Image ):
A , A = image.size
else:
A , A = image.shape[1], image.shape[2]
if w < h:
A = int(self.size['shortest_edge'] * h / w )
A = self.size['shortest_edge']
elif w > h:
A = self.size['shortest_edge']
A = int(self.size['shortest_edge'] * w / h )
else:
A = self.size['shortest_edge']
A = self.size['shortest_edge']
else:
A = []
for image in image_inputs:
A , A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A = max(snake_case , key=lambda snake_case : item[0] )[0]
A = max(snake_case , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase__ ( UpperCamelCase ,unittest.TestCase ):
lowerCAmelCase_ : List[Any] = DeformableDetrImageProcessor if is_vision_available() else None
def A_ ( self : Tuple ) -> str:
'''simple docstring'''
A = DeformableDetrImageProcessingTester(self )
@property
def A_ ( self : str ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , 'image_mean' ) )
self.assertTrue(hasattr(snake_case , 'image_std' ) )
self.assertTrue(hasattr(snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case , 'do_resize' ) )
self.assertTrue(hasattr(snake_case , 'do_rescale' ) )
self.assertTrue(hasattr(snake_case , 'do_pad' ) )
self.assertTrue(hasattr(snake_case , 'size' ) )
def A_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , snake_case )
A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , snake_case )
def A_ ( self : List[Any] ) -> Any:
'''simple docstring'''
pass
def A_ ( self : Any ) -> Any:
'''simple docstring'''
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A , A = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
A = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ) -> int:
'''simple docstring'''
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A = image_processing(snake_case , return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A = image_processing(snake_case , return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
A = json.loads(f.read() )
A = {'image_id': 39_769, 'annotations': target}
# encode them
A = DeformableDetrImageProcessor()
A = image_processing(images=snake_case , annotations=snake_case , return_tensors='pt' )
# verify pixel values
A = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case )
A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case , atol=1E-4 ) )
# verify area
A = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case ) )
# verify boxes
A = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case )
A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case , atol=1E-3 ) )
# verify image_id
A = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case ) )
# verify is_crowd
A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case ) )
# verify class_labels
A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case ) )
# verify orig_size
A = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case ) )
# verify size
A = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case ) )
@slow
def A_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
A = json.loads(f.read() )
A = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
A = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
A = DeformableDetrImageProcessor(format='coco_panoptic' )
A = image_processing(images=snake_case , annotations=snake_case , masks_path=snake_case , return_tensors='pt' )
# verify pixel values
A = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case )
A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case , atol=1E-4 ) )
# verify area
A = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case ) )
# verify boxes
A = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case )
A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case , atol=1E-3 ) )
# verify image_id
A = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case ) )
# verify is_crowd
A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case ) )
# verify class_labels
A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case ) )
# verify masks
A = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , snake_case )
# verify orig_size
A = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case ) )
# verify size
A = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case ) )
| 721
|
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( UpperCamelCase ,unittest.TestCase ):
lowerCAmelCase_ : Tuple = LayoutLMTokenizer
lowerCAmelCase_ : Any = LayoutLMTokenizerFast
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : List[Any] = True
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
A = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : str , **snake_case : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def A_ ( self : List[str] , snake_case : int ) -> List[Any]:
'''simple docstring'''
A = 'UNwant\u00E9d,running'
A = 'unwanted, running'
return input_text, output_text
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A = self.tokenizer_class(self.vocab_file )
A = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [7, 4, 5, 10, 8, 9] )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
pass
| 109
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase : Optional[int] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __A ) -> Union[str, Any]:
if isinstance(__A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__A , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__A ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class __UpperCAmelCase ( _UpperCAmelCase ):
__lowercase = ["""pixel_values"""]
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = 1 / 2_55 , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_snake_case = size if size is not None else {'shortest_edge': 2_24}
_snake_case = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_snake_case = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_snake_case = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
_snake_case = do_resize
_snake_case = size
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = resample
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" in size:
_snake_case = get_resize_output_image_size(lowerCAmelCase_ , size['shortest_edge'] , default_to_square=lowerCAmelCase_ )
elif "height" in size and "width" in size:
_snake_case = (size['height'], size['width'])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , ):
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_snake_case = to_numpy_array(lowerCAmelCase_ )
if do_resize:
_snake_case = self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ )
if do_center_crop:
_snake_case = self.center_crop(lowerCAmelCase_ , size=lowerCAmelCase_ )
if do_rescale:
_snake_case = self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ )
if do_normalize:
_snake_case = self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ )
_snake_case = to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ )
return image
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
_snake_case = make_batched(lowerCAmelCase_ )
_snake_case = [
[
self._preprocess_image(
image=lowerCAmelCase_ , do_resize=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , do_center_crop=lowerCAmelCase_ , crop_size=lowerCAmelCase_ , do_rescale=lowerCAmelCase_ , rescale_factor=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ , image_mean=lowerCAmelCase_ , image_std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , )
for img in video
]
for video in videos
]
_snake_case = {'pixel_values': videos}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 495
|
"""simple docstring"""
import logging
import os
from .state import PartialState
class lowercase ( logging.LoggerAdapter ):
@staticmethod
def _snake_case ( lowercase ) -> Optional[Any]:
lowerCAmelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _snake_case ( self , lowercase , lowercase , *lowercase , **lowercase ) -> Dict:
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
lowerCAmelCase = kwargs.pop("""main_process_only""" , lowercase )
lowerCAmelCase = kwargs.pop("""in_order""" , lowercase )
if self.isEnabledFor(lowercase ):
if self._should_log(lowercase ):
lowerCAmelCase , lowerCAmelCase = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
elif in_order:
lowerCAmelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowerCAmelCase , lowerCAmelCase = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
state.wait_for_everyone()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = None ):
'''simple docstring'''
if log_level is None:
lowerCAmelCase = os.environ.get("""ACCELERATE_LOG_LEVEL""" , SCREAMING_SNAKE_CASE )
lowerCAmelCase = logging.getLogger(SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(SCREAMING_SNAKE_CASE , {} )
| 532
| 0
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = """Usage of script: script_name <size_of_canvas:int>"""
lowercase_ = [0] * 100 + [1] * 10
random.shuffle(choice)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> list[list[bool]]:
lowercase__ = [[False for i in range(_SCREAMING_SNAKE_CASE )] for j in range(_SCREAMING_SNAKE_CASE )]
return canvas
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> None:
for i, row in enumerate(_SCREAMING_SNAKE_CASE ):
for j, _ in enumerate(_SCREAMING_SNAKE_CASE ):
lowercase__ = bool(random.getrandbits(1 ) )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> list[list[bool]]:
lowercase__ = np.array(_SCREAMING_SNAKE_CASE )
lowercase__ = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(_SCREAMING_SNAKE_CASE ):
for c, pt in enumerate(_SCREAMING_SNAKE_CASE ):
lowercase__ = __judge_point(
_SCREAMING_SNAKE_CASE , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
lowercase__ = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
lowercase__ = current_canvas.tolist()
return return_canvas
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
lowercase__ = 0
lowercase__ = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
lowercase__ = pt
if pt:
if alive < 2:
lowercase__ = False
elif alive == 2 or alive == 3:
lowercase__ = True
elif alive > 3:
lowercase__ = False
else:
if alive == 3:
lowercase__ = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ , lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(["""w""", """k"""])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 715
|
from string import ascii_uppercase
lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowercase__ = ''
lowercase__ = 0
lowercase__ = 0
while div != 1:
lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if base >= 11 and 9 < mod < 36:
lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )]
else:
lowercase__ = str(_SCREAMING_SNAKE_CASE )
new_value += actual_value
lowercase__ = num // base
lowercase__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 45
| 0
|
from ... import PretrainedConfig
lowerCamelCase : Dict = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class __lowercase (lowerCamelCase__ ):
"""simple docstring"""
_snake_case = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
_snake_case = 'nezha'
def __init__( self , A=2_1_1_2_8 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=6_4 , A=2 , A=0.02 , A=1e-1_2 , A=0.1 , A=0 , A=2 , A=3 , A=True , **A , ) -> int:
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
snake_case : int = vocab_size
snake_case : int = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Tuple = num_attention_heads
snake_case : Optional[int] = hidden_act
snake_case : List[str] = intermediate_size
snake_case : Union[str, Any] = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : Optional[int] = max_position_embeddings
snake_case : str = max_relative_position
snake_case : List[str] = type_vocab_size
snake_case : Dict = initializer_range
snake_case : Optional[int] = layer_norm_eps
snake_case : int = classifier_dropout
snake_case : List[str] = use_cache
| 587
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase : Dict = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase_ (_lowerCAmelCase : Optional[Any] ):
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase_ (_lowerCAmelCase : List[Any] ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCAmelCase )
def UpperCAmelCase_ (_lowerCAmelCase : str ):
from transformers.testing_utils import pytest_terminal_summary_main
__UpperCamelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCAmelCase , id=_lowerCAmelCase )
def UpperCAmelCase_ (_lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
__UpperCamelCase : Optional[Any] = 0
# Doctest custom flag to ignore output.
lowercase : List[str] = doctest.register_optionflag("IGNORE_RESULT")
lowercase : Union[str, Any] = doctest.OutputChecker
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowercase : Any = CustomOutputChecker
lowercase : Tuple = HfDoctestModule
lowercase : Tuple = HfDocTestParser
| 327
| 0
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__UpperCAmelCase : Optional[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
__UpperCAmelCase : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowerCamelCase :
UpperCAmelCase : int
UpperCAmelCase : Node | None
class lowerCamelCase :
def __init__( self : Any , __snake_case : Iterable[int] ) -> None:
_a : Node | None = None
for i in sorted(__snake_case , reverse=__snake_case ):
_a : Tuple = Node(__snake_case , self.head )
def __iter__( self : Optional[int] ) -> Iterator[int]:
_a : str = self.head
while node:
yield node.data
_a : Tuple = node.next_node
def __len__( self : Optional[int] ) -> int:
return sum(1 for _ in self )
def __str__( self : Tuple ) -> str:
return " -> ".join([str(__snake_case ) for node in self] )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : Dict = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 249
|
from manim import *
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
def snake_case_ ( self : int ) -> Tuple:
_a : Optional[int] = Rectangle(height=0.5 , width=0.5 )
_a : Dict = Rectangle(height=0.25 , width=0.25 )
_a : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_a : Optional[int] = [mem.copy() for i in range(6 )]
_a : Tuple = [mem.copy() for i in range(6 )]
_a : Dict = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_a : Any = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_a : str = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
_a : List[str] = Text('''CPU''' , font_size=24 )
_a : Union[str, Any] = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
_a : Union[str, Any] = [mem.copy() for i in range(4 )]
_a : Tuple = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_a : Tuple = Text('''GPU''' , font_size=24 )
_a : List[Any] = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
_a : Optional[int] = [mem.copy() for i in range(6 )]
_a : Any = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_a : Union[str, Any] = Text('''Model''' , font_size=24 )
_a : str = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
_a : Optional[Any] = []
_a : Optional[Any] = []
_a : Any = []
for i, rect in enumerate(__snake_case ):
rect.set_stroke(__snake_case )
_a : Optional[Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__snake_case , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__snake_case , buff=0.0 )
self.add(__snake_case )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case , *__snake_case , *__snake_case )
_a : List[Any] = [mem.copy() for i in range(6 )]
_a : str = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_a : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=24 )
_a : Any = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
checkpoint.move_to([3, 0.5, 0] )
self.add(__snake_case )
_a : Dict = []
_a : Tuple = []
for i, rect in enumerate(__snake_case ):
_a : str = fill.copy().set_fill(__snake_case , opacity=0.7 )
target.move_to(__snake_case )
ckpt_arr.append(__snake_case )
_a : Optional[int] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__snake_case )
self.add(*__snake_case , *__snake_case )
_a : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a : Union[str, Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case , __snake_case )
_a : Any = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__snake_case )
_a : str = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
_a : Optional[Any] = [meta_mem.copy() for i in range(6 )]
_a : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
_a : Union[str, Any] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_a : Optional[int] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_a : Optional[int] = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
_a : Dict = Text('''Disk''' , font_size=24 )
_a : List[str] = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__snake_case , run_time=3 ) , Write(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) )
_a : List[Any] = []
for i, rect in enumerate(__snake_case ):
_a : Dict = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__snake_case , run_time=1.5 ) )
self.play(*__snake_case )
self.play(FadeOut(__snake_case ) )
_a : Optional[int] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case , run_time=3 ) )
self.play(
FadeOut(__snake_case , __snake_case , *__snake_case , *__snake_case ) , )
self.wait()
| 249
| 1
|
import qiskit
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : int ):
lowerCAmelCase_ : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
lowerCAmelCase_ : Union[str, Any] = qiskit.QuantumCircuit(__UpperCamelCase ,__UpperCamelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] ,[0, 1] )
# Execute the circuit on the qasm simulator
lowerCAmelCase_ : Union[str, Any] = qiskit.execute(__UpperCamelCase ,__UpperCamelCase ,shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__UpperCamelCase )
if __name__ == "__main__":
A__ : Dict = single_qubit_measure(2, 2)
print(F'''Total count for various states are: {counts}''')
| 171
|
def UpperCamelCase( __UpperCamelCase : int = 10**12 ):
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : Dict = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F'''{solution() = }''')
| 171
| 1
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ):
lowercase = os.path.join(args.tf_model_dir , """parameters.json""" )
lowercase = json.loads(open(lowercase_ ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(""".pt""" ):
lowercase = args.output + """.pt"""
lowercase = OrderedDict()
with tf.device("""/CPU:0""" ):
lowercase = tf.train.load_checkpoint(args.tf_model_dir )
lowercase = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase = reader.get_tensor(lowercase_ ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
lowercase = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
lowercase = 8
lowercase = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowercase_ )
elif key_name.startswith("""model/moe""" ):
lowercase = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
lowercase = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/softmlp/kernel""" ):
lowercase = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
lowercase = key_name[-9:-7]
for i in range(16 ):
lowercase = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
lowercase = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase = torch.tensor(lowercase_ )
elif key_name.startswith("""model/mlp""" ):
lowercase = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
lowercase = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/p1/bias""" ):
lowercase = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/p2/kernel""" ):
lowercase = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/p2/bias""" ):
lowercase = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowercase_ )
elif key_name.startswith("""model/ln""" ):
lowercase = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
lowercase = """model.blocks.%d.feed_forward.norm.bias""" % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/g""" ):
lowercase = """model.blocks.%d.feed_forward.norm.weight""" % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowercase_ )
elif key_name.startswith("""model/att""" ):
lowercase = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
lowercase = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase = state[:, 0, :, :]
lowercase = state[:, 1, :, :]
lowercase = state[:, 2, :, :]
lowercase = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
lowercase = torch.tensor(lowercase_ )
lowercase = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
lowercase = torch.tensor(lowercase_ )
lowercase = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/o/kernel""" ):
lowercase = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
lowercase = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowercase_ )
elif key_name.startswith("""model/an""" ):
lowercase = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
lowercase = """model.blocks.%d.self_attn.norm.bias""" % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/g""" ):
lowercase = """model.blocks.%d.self_attn.norm.weight""" % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowercase_ )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
lowercase = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
lowercase = """model.%s.weight""" % nlayer
lowercase = vnp.copy() # same in embedded
lowercase = torch.tensor(lowercase_ )
if key_name.startswith("""model/wte""" ):
lowercase = """lm_head.weight"""
lowercase = vnp.copy() # same in embedded
lowercase = torch.tensor(lowercase_ )
elif key_name.startswith("""model/wob""" ):
lowercase = """final_logits_bias"""
lowercase = vnp.copy() # same in embedded
lowercase = state.reshape((1, -1) )
lowercase = torch.tensor(lowercase_ )
elif key_name == "model/dense/kernel":
lowercase = """model.last_project.weight"""
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowercase_ )
elif key_name == "model/dense_1/bias":
lowercase = """model.last_project.bias"""
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowercase_ )
torch.save(lowercase_ , args.output )
if __name__ == "__main__":
lowercase_ : int = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ : str = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 653
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {'''vocab_file''': '''spm_char.model'''}
lowercase_ : int = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
lowercase_ : Optional[Any] = {
'''microsoft/speecht5_asr''': 1024,
'''microsoft/speecht5_tts''': 1024,
'''microsoft/speecht5_vc''': 1024,
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@property
def _a ( self ) -> List[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self ) -> str:
'''simple docstring'''
lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = self.sp_model.IdToPiece(_lowerCAmelCase )
return token
def _a ( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = []
lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase = []
else:
current_sub_tokens.append(_lowerCAmelCase )
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
lowercase = [1]
if token_ids_a is None:
return ([0] * len(_lowerCAmelCase )) + suffix_ones
return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 653
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Any = StableDiffusionInpaintPipeline
lowerCAmelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase : Optional[Any] = frozenset([] )
def __lowercase ( self : Dict ):
torch.manual_seed(0 )
_a : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=9 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=_UpperCAmelCase ,)
_a : Optional[int] = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
_a : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
_a : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='gelu' ,projection_dim=512 ,)
_a : str = CLIPTextModel(_UpperCAmelCase )
_a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self : List[Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
_a : Any = floats_tensor((1, 3, 32, 32) ,rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
_a : Any = image.cpu().permute(0 ,2 ,3 ,1 )[0]
_a : Optional[int] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('RGB' ).resize((64, 64) )
_a : Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(_UpperCAmelCase ).startswith('mps' ):
_a : str = torch.manual_seed(_UpperCAmelCase )
else:
_a : Dict = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_a : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self : List[str] ):
_a : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : Dict = self.get_dummy_components()
_a : int = StableDiffusionInpaintPipeline(**_UpperCAmelCase )
_a : Any = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_a : Dict = self.get_dummy_inputs(_UpperCAmelCase )
_a : Tuple = sd_pipe(**_UpperCAmelCase ).images
_a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : List[Any] = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : List[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : int ):
_a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
_a : List[Any] = 'stabilityai/stable-diffusion-2-inpainting'
_a : Tuple = StableDiffusionInpaintPipeline.from_pretrained(_UpperCAmelCase ,safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
_a : int = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : Dict = torch.manual_seed(0 )
_a : Any = pipe(
prompt=_UpperCAmelCase ,image=_UpperCAmelCase ,mask_image=_UpperCAmelCase ,generator=_UpperCAmelCase ,output_type='np' ,)
_a : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __lowercase ( self : Tuple ):
_a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
_a : int = 'stabilityai/stable-diffusion-2-inpainting'
_a : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase ,torch_dtype=torch.floataa ,safety_checker=_UpperCAmelCase ,)
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
_a : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : int = torch.manual_seed(0 )
_a : Dict = pipe(
prompt=_UpperCAmelCase ,image=_UpperCAmelCase ,mask_image=_UpperCAmelCase ,generator=_UpperCAmelCase ,output_type='np' ,)
_a : str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __lowercase ( self : Tuple ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_a : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : List[str] = 'stabilityai/stable-diffusion-2-inpainting'
_a : Any = PNDMScheduler.from_pretrained(_UpperCAmelCase ,subfolder='scheduler' )
_a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase ,safety_checker=_UpperCAmelCase ,scheduler=_UpperCAmelCase ,torch_dtype=torch.floataa ,)
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_a : Any = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : Optional[Any] = torch.manual_seed(0 )
_a : int = pipe(
prompt=_UpperCAmelCase ,image=_UpperCAmelCase ,mask_image=_UpperCAmelCase ,generator=_UpperCAmelCase ,num_inference_steps=2 ,output_type='np' ,)
_a : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 358
| 0
|
'''simple docstring'''
import random
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : bool = False ):
"""simple docstring"""
_lowerCamelCase : dict = {i: [] for i in range(_lowerCAmelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_lowerCAmelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_lowerCAmelCase ):
for j in range(i + 1 , _lowerCAmelCase ):
if random.random() < probability:
graph[i].append(_lowerCAmelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_lowerCAmelCase )
return graph
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
return {
i: [j for j in range(_lowerCAmelCase ) if i != j] for i in range(_lowerCAmelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 11
| 0
|
import math
def a ( snake_case__: float , snake_case__: float ):
'''simple docstring'''
return math.pow(snake_case__ , 2 ) - a
def a ( snake_case__: float ):
'''simple docstring'''
return 2 * x
def a ( snake_case__: float ):
'''simple docstring'''
lowercase_ = 2.0
while start <= a:
lowercase_ = math.pow(snake_case__ , 2 )
return start
def a ( snake_case__: float , snake_case__: int = 9_999 , snake_case__: float = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ):
'''simple docstring'''
if a < 0:
raise ValueError('''math domain error''' )
lowercase_ = get_initial_point(snake_case__ )
for _ in range(snake_case__ ):
lowercase_ = value
lowercase_ = value - fx(snake_case__ , snake_case__ ) / fx_derivative(snake_case__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 97
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase_ = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
_A = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_A = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_A = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_A = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _UpperCAmelCase ( self : Tuple ):
_a = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' )
_a = text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'label': 'LABEL_0', 'score': 0.504}] )
_a = text_classifier('This is great !' , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}] )
_a = text_classifier(['This is great !', 'This is bad'] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
_a = text_classifier('This is great !' , top_k=1 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'label': 'LABEL_0', 'score': 0.504}] )
# Legacy behavior
_a = text_classifier('This is great !' , return_all_scores=SCREAMING_SNAKE_CASE_ )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'label': 'LABEL_0', 'score': 0.504}] )
_a = text_classifier('This is great !' , return_all_scores=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}]] )
_a = text_classifier(['This is great !', 'Something else'] , return_all_scores=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
_a = text_classifier(['This is great !', 'Something else'] , return_all_scores=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [
{'label': 'LABEL_0', 'score': 0.504},
{'label': 'LABEL_0', 'score': 0.504},
] , )
@require_torch
def _UpperCAmelCase ( self : Tuple ):
import torch
_a = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , )
_a = text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'label': 'LABEL_0', 'score': 0.504}] )
@require_tf
def _UpperCAmelCase ( self : Optional[int] ):
_a = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' )
_a = text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'label': 'LABEL_0', 'score': 0.504}] )
@slow
@require_torch
def _UpperCAmelCase ( self : Dict ):
_a = pipeline('text-classification' )
_a = text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'label': 'POSITIVE', 'score': 1.0}] )
_a = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
_a = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'label': 'POSITIVE', 'score': 0.988}] )
@slow
@require_tf
def _UpperCAmelCase ( self : str ):
_a = pipeline('text-classification' , framework='tf' )
_a = text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'label': 'POSITIVE', 'score': 1.0}] )
_a = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
_a = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'label': 'POSITIVE', 'score': 0.988}] )
def _UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str ):
_a = TextClassificationPipeline(model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _UpperCAmelCase ( self : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
_a = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
_a = 'HuggingFace is in'
_a = text_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'label': ANY(SCREAMING_SNAKE_CASE_ ), 'score': ANY(SCREAMING_SNAKE_CASE_ )}] )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
_a = ['HuggingFace is in ', 'Paris is in France']
_a = text_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'label': ANY(SCREAMING_SNAKE_CASE_ ), 'score': ANY(SCREAMING_SNAKE_CASE_ )}, {'label': ANY(SCREAMING_SNAKE_CASE_ ), 'score': ANY(SCREAMING_SNAKE_CASE_ )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
_a = text_classifier(SCREAMING_SNAKE_CASE_ , top_k=SCREAMING_SNAKE_CASE_ )
_a = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [[{'label': ANY(SCREAMING_SNAKE_CASE_ ), 'score': ANY(SCREAMING_SNAKE_CASE_ )}] * N, [{'label': ANY(SCREAMING_SNAKE_CASE_ ), 'score': ANY(SCREAMING_SNAKE_CASE_ )}] * N] , )
_a = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
_a = text_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {'label': ANY(SCREAMING_SNAKE_CASE_ ), 'score': ANY(SCREAMING_SNAKE_CASE_ )} , )
self.assertTrue(outputs['label'] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
_a = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
text_classifier(SCREAMING_SNAKE_CASE_ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
_a = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'label': ANY(SCREAMING_SNAKE_CASE_ ), 'score': ANY(SCREAMING_SNAKE_CASE_ )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
| 562
| 0
|
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCAmelCase_ : str = re.compile(R'\b(a|an|the)\b', re.UNICODE)
lowerCAmelCase_ : List[str] = None
def _lowerCamelCase ( ) -> List[str]:
_a = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=lowercase , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=lowercase , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _lowerCamelCase ( lowercase : Any ) -> int:
_a = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_a = bool(qa["answers"]["text"] )
return qid_to_has_ans
def _lowerCamelCase ( lowercase : Tuple ) -> List[str]:
def remove_articles(lowercase : Optional[int] ):
return ARTICLES_REGEX.sub(" " , lowercase )
def white_space_fix(lowercase : List[str] ):
return " ".join(text.split() )
def remove_punc(lowercase : List[Any] ):
_a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase ) ) ) )
def _lowerCamelCase ( lowercase : Any ) -> str:
if not s:
return []
return normalize_answer(lowercase ).split()
def _lowerCamelCase ( lowercase : List[str] , lowercase : Optional[int] ) -> Tuple:
return int(normalize_answer(lowercase ) == normalize_answer(lowercase ) )
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Optional[Any] ) -> Dict:
_a = get_tokens(lowercase )
_a = get_tokens(lowercase )
_a = collections.Counter(lowercase ) & collections.Counter(lowercase )
_a = sum(common.values() )
if len(lowercase ) == 0 or len(lowercase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_a = 1.0 * num_same / len(lowercase )
_a = 1.0 * num_same / len(lowercase )
_a = (2 * precision * recall) / (precision + recall)
return fa
def _lowerCamelCase ( lowercase : Any , lowercase : int ) -> Optional[Any]:
_a = {}
_a = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_a = qa['''id''']
_a = [t for t in qa['''answers''']['''text'''] if normalize_answer(lowercase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_a = ['''''']
if qid not in preds:
print(F'Missing prediction for {qid}' )
continue
_a = preds[qid]
# Take max over all gold answers
_a = max(compute_exact(lowercase , lowercase ) for a in gold_answers )
_a = max(compute_fa(lowercase , lowercase ) for a in gold_answers )
return exact_scores, fa_scores
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : Any ) -> List[Any]:
_a = {}
for qid, s in scores.items():
_a = na_probs[qid] > na_prob_thresh
if pred_na:
_a = float(not qid_to_has_ans[qid] )
else:
_a = s
return new_scores
def _lowerCamelCase ( lowercase : str , lowercase : Optional[Any] , lowercase : Tuple=None ) -> Union[str, Any]:
if not qid_list:
_a = len(lowercase )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
_a = len(lowercase )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def _lowerCamelCase ( lowercase : List[str] , lowercase : Optional[int] , lowercase : int ) -> Optional[Any]:
for k in new_eval:
_a = new_eval[k]
def _lowerCamelCase ( lowercase : List[Any] , lowercase : List[str] , lowercase : Optional[int] , lowercase : Dict ) -> Union[str, Any]:
plt.step(lowercase , lowercase , color="b" , alpha=0.2 , where="post" )
plt.fill_between(lowercase , lowercase , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowercase )
plt.savefig(lowercase )
plt.clf()
def _lowerCamelCase ( lowercase : List[str] , lowercase : Tuple , lowercase : Optional[int] , lowercase : int , lowercase : Optional[int]=None , lowercase : Optional[Any]=None ) -> Optional[Any]:
_a = sorted(lowercase , key=lambda lowercase : na_probs[k] )
_a = 0.0
_a = 1.0
_a = 0.0
_a = [1.0]
_a = [0.0]
_a = 0.0
for i, qid in enumerate(lowercase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_a = true_pos / float(i + 1 )
_a = true_pos / float(lowercase )
if i == len(lowercase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowercase )
recalls.append(lowercase )
if out_image:
plot_pr_curve(lowercase , lowercase , lowercase , lowercase )
return {"ap": 100.0 * avg_prec}
def _lowerCamelCase ( lowercase : Dict , lowercase : List[str] , lowercase : Optional[int] , lowercase : Dict , lowercase : List[str] , lowercase : List[Any] ) -> List[str]:
if out_image_dir and not os.path.exists(lowercase ):
os.makedirs(lowercase )
_a = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_a = make_precision_recall_eval(
lowercase , lowercase , lowercase , lowercase , out_image=os.path.join(lowercase , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
_a = make_precision_recall_eval(
lowercase , lowercase , lowercase , lowercase , out_image=os.path.join(lowercase , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
_a = {k: float(lowercase ) for k, v in qid_to_has_ans.items()}
_a = make_precision_recall_eval(
lowercase , lowercase , lowercase , lowercase , out_image=os.path.join(lowercase , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(lowercase , lowercase , "pr_exact" )
merge_eval(lowercase , lowercase , "pr_f1" )
merge_eval(lowercase , lowercase , "pr_oracle" )
def _lowerCamelCase ( lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Optional[Any] ) -> Optional[int]:
if not qid_list:
return
_a = [na_probs[k] for k in qid_list]
_a = np.ones_like(lowercase ) / float(len(lowercase ) )
plt.hist(lowercase , weights=lowercase , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(lowercase , F'na_prob_hist_{name}.png' ) )
plt.clf()
def _lowerCamelCase ( lowercase : str , lowercase : int , lowercase : Tuple , lowercase : List[Any] ) -> Any:
_a = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_a = num_no_ans
_a = cur_score
_a = 0.0
_a = sorted(lowercase , key=lambda lowercase : na_probs[k] )
for i, qid in enumerate(lowercase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_a = scores[qid]
else:
if preds[qid]:
_a = -1
else:
_a = 0
cur_score += diff
if cur_score > best_score:
_a = cur_score
_a = na_probs[qid]
return 100.0 * best_score / len(lowercase ), best_thresh
def _lowerCamelCase ( lowercase : List[Any] , lowercase : int , lowercase : List[Any] , lowercase : int , lowercase : int , lowercase : Any ) -> Dict:
_a = find_best_thresh(lowercase , lowercase , lowercase , lowercase )
_a = find_best_thresh(lowercase , lowercase , lowercase , lowercase )
_a = best_exact
_a = exact_thresh
_a = best_fa
_a = fa_thresh
def _lowerCamelCase ( ) -> List[Any]:
with open(OPTS.data_file ) as f:
_a = json.load(lowercase )
_a = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
_a = json.load(lowercase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_a = json.load(lowercase )
else:
_a = {k: 0.0 for k in preds}
_a = make_qid_to_has_ans(lowercase ) # maps qid to True/False
_a = [k for k, v in qid_to_has_ans.items() if v]
_a = [k for k, v in qid_to_has_ans.items() if not v]
_a = get_raw_scores(lowercase , lowercase )
_a = apply_no_ans_threshold(lowercase , lowercase , lowercase , OPTS.na_prob_thresh )
_a = apply_no_ans_threshold(lowercase , lowercase , lowercase , OPTS.na_prob_thresh )
_a = make_eval_dict(lowercase , lowercase )
if has_ans_qids:
_a = make_eval_dict(lowercase , lowercase , qid_list=lowercase )
merge_eval(lowercase , lowercase , "HasAns" )
if no_ans_qids:
_a = make_eval_dict(lowercase , lowercase , qid_list=lowercase )
merge_eval(lowercase , lowercase , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowercase , lowercase , lowercase , lowercase , lowercase , OPTS.out_image_dir )
histogram_na_prob(lowercase , lowercase , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(lowercase , lowercase , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(lowercase , lowercase )
else:
print(json.dumps(lowercase , indent=2 ) )
if __name__ == "__main__":
lowerCAmelCase_ : Dict = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 700
|
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Any , lowercase : str ) -> str:
# Return True if there is node that has not iterated.
_a = [False] * len(lowercase )
_a = [s]
_a = True
while queue:
_a = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase )
_a = True
_a = u
return visited[t]
def _lowerCamelCase ( lowercase : Dict , lowercase : Optional[Any] , lowercase : Dict ) -> Union[str, Any]:
_a = [-1] * (len(lowercase ))
_a = 0
_a = []
_a = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase , lowercase , lowercase , lowercase ):
_a = float("Inf" )
_a = sink
while s != source:
# Find the minimum value in select path
_a = min(lowercase , graph[parent[s]][s] )
_a = parent[s]
max_flow += path_flow
_a = sink
while v != source:
_a = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_a = parent[v]
for i in range(len(lowercase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 521
| 0
|
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( A__ , A__ ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :int = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase__ :str = torch.zeros(UpperCamelCase_ , UpperCamelCase_ )
else:
UpperCamelCase__ :str = None
UpperCamelCase__ :List[str] = torch.nn.Parameter(UpperCamelCase_ )
class lowercase ( A__ ):
"""simple docstring"""
_a = 42
_a = 42
_a = 42
_a = 42
_a = 42
_a = 42
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=UpperCamelCase_ , transformer=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else 1
# get prompt text embeddings
UpperCamelCase__ :str = self.tokenizer(
UpperCamelCase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCamelCase__ :Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase__ :int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase__ :Any = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase__ :Optional[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase__ :List[str] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase__ :Optional[int] = prompt_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase__ :str = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase__ :List[str] = negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCamelCase_ , 1 , 1 )
else:
UpperCamelCase__ :Any = [''''''] * batch_size
UpperCamelCase__ :Tuple = text_input_ids.shape[-1]
UpperCamelCase__ :Dict = self.tokenizer(
UpperCamelCase_ , padding='''max_length''' , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors='''pt''' , )
UpperCamelCase__ :int = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase__ :int = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ :Any = negative_prompt_embeds.shape[1]
UpperCamelCase__ :Tuple = negative_prompt_embeds.repeat(1 , UpperCamelCase_ , 1 )
UpperCamelCase__ :Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ :Any = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , UpperCamelCase_ , UpperCamelCase_ = 100 , UpperCamelCase_ = 5.0 , UpperCamelCase_ = 1.0 , UpperCamelCase_ = 1 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "pil" , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = 1 , ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :List[Any] = 1
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Any = len(UpperCamelCase_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}''' )
UpperCamelCase__ :Any = batch_size * num_images_per_prompt
UpperCamelCase__ :str = guidance_scale > 1.0
UpperCamelCase__ :Tuple = self._encode_prompt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(UpperCamelCase_ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase__ :int = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase__ :Optional[Any] = self.transformer.num_vector_embeds - 1
UpperCamelCase__ :int = torch.full(UpperCamelCase_ , UpperCamelCase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCamelCase__ :str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase_ , device=self.device )
UpperCamelCase__ :Dict = self.scheduler.timesteps.to(self.device )
UpperCamelCase__ :List[str] = latents
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase__ :Dict = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase__ :Optional[Any] = self.transformer(UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ ).sample
if do_classifier_free_guidance:
UpperCamelCase__ , UpperCamelCase__ :Dict = model_output.chunk(2 )
UpperCamelCase__ :Any = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(UpperCamelCase_ , dim=1 , keepdim=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase__ :str = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ :List[str] = self.scheduler.step(UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Any = self.vqvae.config.vq_embed_dim
UpperCamelCase__ :str = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase__ :List[str] = self.vqvae.quantize.get_codebook_entry(UpperCamelCase_ , shape=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = self.vqvae.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ ).sample
UpperCamelCase__ :Dict = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ :Tuple = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :List[str] = torch.sort(UpperCamelCase_ , 1 , descending=UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = torch.exp(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase__ :Any = torch.full_like(keep_mask[:, 0:1, :] , UpperCamelCase_ )
UpperCamelCase__ :int = torch.cat((all_true, keep_mask) , dim=1 )
UpperCamelCase__ :List[str] = keep_mask[:, :-1, :]
UpperCamelCase__ :List[Any] = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCamelCase__ :str = log_p_x_0.clone()
UpperCamelCase__ :Any = -torch.inf # -inf = log(0)
return rv
| 189
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__snake_case = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 189
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__= StableDiffusionPanoramaPipeline
A__= TEXT_TO_IMAGE_PARAMS
A__= TEXT_TO_IMAGE_BATCH_PARAMS
A__= TEXT_TO_IMAGE_IMAGE_PARAMS
A__= TEXT_TO_IMAGE_IMAGE_PARAMS
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
UpperCAmelCase__ = DDIMScheduler()
torch.manual_seed(0 )
UpperCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCAmelCase__ = CLIPTextModel(_lowercase )
UpperCAmelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : Optional[int] , _lowercase : int=0 ):
"""simple docstring"""
UpperCAmelCase__ = torch.manual_seed(_lowercase )
UpperCAmelCase__ = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = StableDiffusionPanoramaPipeline(**_lowercase )
UpperCAmelCase__ = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
UpperCAmelCase__ = self.get_dummy_inputs(_lowercase )
UpperCAmelCase__ = sd_pipe(**_lowercase ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = StableDiffusionPanoramaPipeline(**_lowercase )
UpperCAmelCase__ = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
UpperCAmelCase__ = self.get_dummy_inputs(_lowercase )
UpperCAmelCase__ = "french fries"
UpperCAmelCase__ = sd_pipe(**_lowercase , negative_prompt=_lowercase )
UpperCAmelCase__ = output.images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = StableDiffusionPanoramaPipeline(**_lowercase )
UpperCAmelCase__ = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
UpperCAmelCase__ = self.get_dummy_inputs(_lowercase )
UpperCAmelCase__ = sd_pipe(**_lowercase , view_batch_size=2 )
UpperCAmelCase__ = output.images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" )
UpperCAmelCase__ = StableDiffusionPanoramaPipeline(**_lowercase )
UpperCAmelCase__ = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
UpperCAmelCase__ = self.get_dummy_inputs(_lowercase )
UpperCAmelCase__ = sd_pipe(**_lowercase ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , skip_prk_steps=_lowercase )
UpperCAmelCase__ = StableDiffusionPanoramaPipeline(**_lowercase )
UpperCAmelCase__ = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
UpperCAmelCase__ = self.get_dummy_inputs(_lowercase )
UpperCAmelCase__ = sd_pipe(**_lowercase ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Optional[int] , _lowercase : List[str]=0 ):
"""simple docstring"""
UpperCAmelCase__ = torch.manual_seed(_lowercase )
UpperCAmelCase__ = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = "stabilityai/stable-diffusion-2-base"
UpperCAmelCase__ = DDIMScheduler.from_pretrained(_lowercase , subfolder="scheduler" )
UpperCAmelCase__ = StableDiffusionPanoramaPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
UpperCAmelCase__ = self.get_inputs()
UpperCAmelCase__ = pipe(**_lowercase ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
UpperCAmelCase__ = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=_lowercase )
UpperCAmelCase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
UpperCAmelCase__ = self.get_inputs()
UpperCAmelCase__ = pipe(**_lowercase ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
UpperCAmelCase__ = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = 0
def callback_fn(_lowercase : int , _lowercase : int , _lowercase : torch.FloatTensor ) -> None:
UpperCAmelCase__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
UpperCAmelCase__ = latents[0, -3:, -3:, -1]
UpperCAmelCase__ = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
UpperCAmelCase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
UpperCAmelCase__ = latents[0, -3:, -3:, -1]
UpperCAmelCase__ = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
UpperCAmelCase__ = False
UpperCAmelCase__ = "stabilityai/stable-diffusion-2-base"
UpperCAmelCase__ = DDIMScheduler.from_pretrained(_lowercase , subfolder="scheduler" )
UpperCAmelCase__ = StableDiffusionPanoramaPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
UpperCAmelCase__ = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
UpperCAmelCase__ = self.get_inputs()
pipe(**_lowercase , callback=_lowercase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ = "stabilityai/stable-diffusion-2-base"
UpperCAmelCase__ = DDIMScheduler.from_pretrained(_lowercase , subfolder="scheduler" )
UpperCAmelCase__ = StableDiffusionPanoramaPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
UpperCAmelCase__ = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ = self.get_inputs()
UpperCAmelCase__ = pipe(**_lowercase )
UpperCAmelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 277
|
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowercase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__= WavaVecaPhonemeCTCTokenizer
A__= False
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
UpperCAmelCase__ = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" " )
UpperCAmelCase__ = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
UpperCAmelCase__ = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowercase ) + "\n" )
def _UpperCAmelCase ( self : str , _lowercase : Union[str, Any] , _lowercase : str=False , _lowercase : Tuple=20 , _lowercase : int=5 ):
"""simple docstring"""
UpperCAmelCase__ = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=_lowercase )) for i in range(len(_lowercase ) )]
UpperCAmelCase__ = list(filter(lambda _lowercase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=_lowercase ) , _lowercase ) )
if max_length is not None and len(_lowercase ) > max_length:
UpperCAmelCase__ = toks[:max_length]
if min_length is not None and len(_lowercase ) < min_length and len(_lowercase ) > 0:
while len(_lowercase ) < min_length:
UpperCAmelCase__ = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase__ = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase__ = tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase )
if " " not in output_txt and len(_lowercase ) > 1:
UpperCAmelCase__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowercase )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowercase )
)
if with_prefix_space:
UpperCAmelCase__ = " " + output_txt
UpperCAmelCase__ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
return output_txt, output_ids
def _UpperCAmelCase ( self : Optional[Any] , **_lowercase : Optional[int] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
UpperCAmelCase__ = tokenizer("m xxx ɪ" , do_phonemize=_lowercase ).input_ids
self.assertEqual(_lowercase , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
UpperCAmelCase__ = tokenizer("m aaa ɪ ccc" , do_phonemize=_lowercase ).input_ids
self.assertEqual(_lowercase , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
UpperCAmelCase__ = tokenizer("maɪ c" , do_phonemize=_lowercase ).input_ids
self.assertEqual(_lowercase , [3, 2_00] ) # mai should be <unk> (=3)
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
UpperCAmelCase__ = "Hello how are you"
UpperCAmelCase__ = tokenizer.phonemize(_lowercase , phonemizer_lang="en-us" )
self.assertEqual(_lowercase , "h ə l oʊ h aʊ ɑːɹ j uː" )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
UpperCAmelCase__ = "Hello how are you"
UpperCAmelCase__ = tokenizer.phonemize(_lowercase , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(_lowercase ).input_ids , tokenizer(_lowercase , do_phonemize=_lowercase ).input_ids )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
UpperCAmelCase__ = "Hello how are you"
UpperCAmelCase__ = tokenizer.phonemize(_lowercase , phonemizer_lang="en-us" )
UpperCAmelCase__ = tokenizer.decode(tokenizer(_lowercase ).input_ids )
self.assertEqual(_lowercase , _lowercase )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
UpperCAmelCase__ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
UpperCAmelCase__ = tokenizer.decode(sample_ids[0] )
UpperCAmelCase__ = tokenizer.batch_decode(_lowercase )
self.assertEqual(_lowercase , batch_tokens[0] )
self.assertEqual(_lowercase , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
UpperCAmelCase__ = "Hello how are you"
UpperCAmelCase__ = tokenizer.phonemize(_lowercase , phonemizer_lang="en-us" )
self.assertEqual(_lowercase , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
UpperCAmelCase__ = "Hello how are you"
UpperCAmelCase__ = tokenizer.phonemize(_lowercase , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(_lowercase ).input_ids , tokenizer(_lowercase , do_phonemize=_lowercase ).input_ids )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
UpperCAmelCase__ = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
UpperCAmelCase__ = tokenizer.decode(sample_ids[0] )
UpperCAmelCase__ = tokenizer.batch_decode(_lowercase )
self.assertEqual(_lowercase , batch_tokens[0] )
self.assertEqual(_lowercase , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
UpperCAmelCase__ = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=_lowercase )
UpperCAmelCase__ = tokenizer.batch_decode(_lowercase , filter_word_delimiter_token=_lowercase )
self.assertEqual(_lowercase , batch_tokens[0] )
self.assertEqual(_lowercase , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
UpperCAmelCase__ = "Hello how are you"
UpperCAmelCase__ = tokenizer.phonemize(_lowercase , phonemizer_lang="en-us" )
UpperCAmelCase__ = tokenizer.decode(tokenizer(_lowercase ).input_ids , filter_word_delimiter_token=_lowercase )
self.assertEqual(_lowercase , _lowercase )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
UpperCAmelCase__ = "Hello how are you"
UpperCAmelCase__ = tokenizer.phonemize(_lowercase , phonemizer_lang="en-us" )
UpperCAmelCase__ = tokenizer.decode(tokenizer(_lowercase ).input_ids , filter_word_delimiter_token=_lowercase )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , _lowercase )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=_lowercase )
UpperCAmelCase__ = "Hello how are you"
UpperCAmelCase__ = tokenizer(_lowercase , phonemizer_lang="en-us" ).input_ids
UpperCAmelCase__ = tokenizer(_lowercase , phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(_lowercase , _lowercase )
UpperCAmelCase__ = tokenizer.decode(_lowercase )
UpperCAmelCase__ = tokenizer.decode(_lowercase )
self.assertEqual(_lowercase , "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(_lowercase , "ɛ l o h aʊ a ʁ j u" )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
UpperCAmelCase__ = "Hello how Are you"
UpperCAmelCase__ = "hello how are you"
UpperCAmelCase__ = tokenizer(_lowercase ).input_ids
UpperCAmelCase__ = tokenizer(_lowercase ).input_ids
self.assertEqual(_lowercase , _lowercase )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
UpperCAmelCase__ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
UpperCAmelCase__ = tokenizer.batch_decode(_lowercase )
self.assertEqual(_lowercase , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def _UpperCAmelCase ( _lowercase : int , _lowercase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
UpperCAmelCase__ = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
UpperCAmelCase__ = tokenizer.decode(_lowercase , output_char_offsets=_lowercase , filter_word_delimiter_token=_lowercase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(_lowercase , _lowercase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(_lowercase : Any , _lowercase : Optional[Any] ):
self.assertTrue(isinstance(_lowercase , _lowercase ) )
self.assertTrue(isinstance(outputs_list[0] , _lowercase ) )
# transform list to ModelOutput
UpperCAmelCase__ = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] )
def recursive_check(_lowercase : Optional[int] , _lowercase : Optional[int] ):
if isinstance(_lowercase , _lowercase ):
[recursive_check(_lowercase , _lowercase ) for la, la in zip(_lowercase , _lowercase )]
self.assertEqual(_lowercase , _lowercase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] )
# fmt: off
UpperCAmelCase__ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
UpperCAmelCase__ = tokenizer.batch_decode(_lowercase , output_char_offsets=_lowercase )
UpperCAmelCase__ = [tokenizer.decode(_lowercase , output_char_offsets=_lowercase ) for ids in sample_ids]
check_list_tuples_equal(_lowercase , _lowercase )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase__ = tokenizer.vocab_size
UpperCAmelCase__ = len(_lowercase )
self.assertNotEqual(_lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase__ = ["aaaaa bbbbbb", "cccccccccdddddddd"]
UpperCAmelCase__ = tokenizer.add_tokens(_lowercase )
UpperCAmelCase__ = tokenizer.vocab_size
UpperCAmelCase__ = len(_lowercase )
self.assertNotEqual(_lowercase , 0 )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , len(_lowercase ) )
self.assertEqual(_lowercase , all_size + len(_lowercase ) )
UpperCAmelCase__ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=_lowercase )
self.assertGreaterEqual(len(_lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCAmelCase__ = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
UpperCAmelCase__ = tokenizer.add_special_tokens(_lowercase )
UpperCAmelCase__ = tokenizer.vocab_size
UpperCAmelCase__ = len(_lowercase )
self.assertNotEqual(_lowercase , 0 )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , len(_lowercase ) )
self.assertEqual(_lowercase , all_size_a + len(_lowercase ) )
UpperCAmelCase__ = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=_lowercase )
self.assertGreaterEqual(len(_lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizers(fast=_lowercase , do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase__ = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
UpperCAmelCase__ = tokenizer.convert_tokens_to_string(_lowercase )
self.assertIsInstance(output["text"] , _lowercase )
| 277
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : int = logging.get_logger(__name__)
lowerCAmelCase__ : List[Any] = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = """markuplm"""
def __init__( self : int , snake_case_ : List[str]=3_0_5_2_2 , snake_case_ : Tuple=7_6_8 , snake_case_ : str=1_2 , snake_case_ : str=1_2 , snake_case_ : Optional[Any]=3_0_7_2 , snake_case_ : int="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : Tuple=0.1 , snake_case_ : Tuple=5_1_2 , snake_case_ : Tuple=2 , snake_case_ : str=0.0_2 , snake_case_ : Optional[Any]=1e-12 , snake_case_ : str=0 , snake_case_ : Union[str, Any]=0 , snake_case_ : List[Any]=2 , snake_case_ : Tuple=2_5_6 , snake_case_ : Dict=1_0_2_4 , snake_case_ : Union[str, Any]=2_1_6 , snake_case_ : List[Any]=1_0_0_1 , snake_case_ : int=3_2 , snake_case_ : Dict=5_0 , snake_case_ : str="absolute" , snake_case_ : str=True , snake_case_ : List[str]=None , **snake_case_ : Dict , ):
'''simple docstring'''
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
snake_case__ : List[str] = vocab_size
snake_case__ : str = hidden_size
snake_case__ : Dict = num_hidden_layers
snake_case__ : Tuple = num_attention_heads
snake_case__ : Optional[int] = hidden_act
snake_case__ : int = intermediate_size
snake_case__ : str = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : Tuple = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : Tuple = initializer_range
snake_case__ : int = layer_norm_eps
snake_case__ : Tuple = position_embedding_type
snake_case__ : Dict = use_cache
snake_case__ : List[str] = classifier_dropout
# additional properties
snake_case__ : List[str] = max_depth
snake_case__ : int = max_xpath_tag_unit_embeddings
snake_case__ : List[str] = max_xpath_subs_unit_embeddings
snake_case__ : Dict = tag_pad_id
snake_case__ : List[str] = subs_pad_id
snake_case__ : List[str] = xpath_unit_hidden_size
| 347
|
'''simple docstring'''
from math import factorial
def _a ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : float ):
"""simple docstring"""
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
snake_case__ : Dict = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
snake_case__ : str = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.75))
| 347
| 1
|
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( A ,A ,A ,A="attention" ) -> Union[str, Any]:
lowercase : List[str] = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
lowercase : List[str] = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
lowercase : int = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
lowercase : str = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def _A ( A ,A ,A ,A=False ) -> List[Any]:
if split_mlp_wi:
lowercase : List[str] = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
lowercase : Union[str, Any] = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
lowercase : List[Any] = (wi_a, wi_a)
else:
lowercase : Union[str, Any] = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
lowercase : Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def _A ( A ,A ,A ,A ) -> Optional[int]:
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def _A ( A ,*, A ,A ) -> List[str]:
lowercase : Optional[int] = traverse_util.flatten_dict(variables["target"] )
lowercase : Optional[Any] = {"/".join(UpperCAmelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase : List[Any] = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" ,UpperCAmelCase__ )
lowercase : Tuple = collections.OrderedDict()
# Shared embeddings.
lowercase : Optional[int] = old["token_embedder/embedding"]
# Encoder.
for i in range(UpperCAmelCase__ ):
# Block i, layer 0 (Self Attention).
lowercase : Optional[int] = tax_layer_norm_lookup(UpperCAmelCase__ ,UpperCAmelCase__ ,"encoder" ,"pre_attention_layer_norm" )
lowercase , lowercase , lowercase , lowercase : Tuple = tax_attention_lookup(UpperCAmelCase__ ,UpperCAmelCase__ ,"encoder" ,"attention" )
lowercase : Optional[int] = layer_norm
lowercase : int = k.T
lowercase : str = o.T
lowercase : Union[str, Any] = q.T
lowercase : Dict = v.T
# Block i, layer 1 (MLP).
lowercase : Dict = tax_layer_norm_lookup(UpperCAmelCase__ ,UpperCAmelCase__ ,"encoder" ,"pre_mlp_layer_norm" )
lowercase , lowercase : Optional[int] = tax_mlp_lookup(UpperCAmelCase__ ,UpperCAmelCase__ ,"encoder" ,UpperCAmelCase__ )
lowercase : Dict = layer_norm
if split_mlp_wi:
lowercase : List[str] = wi[0].T
lowercase : Any = wi[1].T
else:
lowercase : Tuple = wi.T
lowercase : Tuple = wo.T
lowercase : str = old[
"encoder/relpos_bias/rel_embedding"
].T
lowercase : List[str] = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase__ ):
# Block i, layer 0 (Self Attention).
lowercase : int = tax_layer_norm_lookup(UpperCAmelCase__ ,UpperCAmelCase__ ,"decoder" ,"pre_self_attention_layer_norm" )
lowercase , lowercase , lowercase , lowercase : List[Any] = tax_attention_lookup(UpperCAmelCase__ ,UpperCAmelCase__ ,"decoder" ,"self_attention" )
lowercase : Any = layer_norm
lowercase : Optional[int] = k.T
lowercase : Dict = o.T
lowercase : List[str] = q.T
lowercase : Optional[int] = v.T
# Block i, layer 1 (Cross Attention).
lowercase : Any = tax_layer_norm_lookup(UpperCAmelCase__ ,UpperCAmelCase__ ,"decoder" ,"pre_cross_attention_layer_norm" )
lowercase , lowercase , lowercase , lowercase : str = tax_attention_lookup(UpperCAmelCase__ ,UpperCAmelCase__ ,"decoder" ,"encoder_decoder_attention" )
lowercase : Dict = layer_norm
lowercase : List[str] = k.T
lowercase : str = o.T
lowercase : Dict = q.T
lowercase : str = v.T
# Block i, layer 2 (MLP).
lowercase : Any = tax_layer_norm_lookup(UpperCAmelCase__ ,UpperCAmelCase__ ,"decoder" ,"pre_mlp_layer_norm" )
lowercase , lowercase : Any = tax_mlp_lookup(UpperCAmelCase__ ,UpperCAmelCase__ ,"decoder" ,UpperCAmelCase__ )
lowercase : Optional[Any] = layer_norm
if split_mlp_wi:
lowercase : int = wi[0].T
lowercase : List[Any] = wi[1].T
else:
lowercase : Optional[Any] = wi.T
lowercase : Any = wo.T
lowercase : int = old["decoder/decoder_norm/scale"]
lowercase : Any = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase : List[Any] = old["decoder/logits_dense/kernel"].T
return new
def _A ( A ,A ) -> Optional[int]:
lowercase : Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase : Union[str, Any] = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase : List[Any] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowercase : int = state_dict["shared.weight"]
return state_dict
def _A ( A ,A ,A ,A ) -> str:
lowercase : Optional[Any] = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
lowercase : Tuple = convert_tax_to_pytorch(UpperCAmelCase__ ,num_layers=config.num_layers ,is_encoder_only=UpperCAmelCase__ )
lowercase : int = make_state_dict(UpperCAmelCase__ ,UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ ,strict=UpperCAmelCase__ )
def _A ( A ,A ,A ,A = False ) -> int:
lowercase : List[str] = TaConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase : Dict = TaEncoderModel(UpperCAmelCase__ )
else:
lowercase : List[Any] = TaForConditionalGeneration(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase__ )
print("Done" )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
lowerCAmelCase : int = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 714
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase : Any = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
_snake_case = 42
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def __init__( self , a_ , a_ , a_ , a_ , a_ , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
prior=a_ , image_encoder=a_ , image_processor=a_ , scheduler=a_ , renderer=a_ , )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> str:
if latents is None:
lowercase : str = randn_tensor(a_ , generator=a_ , device=a_ , dtype=a_ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase : Union[str, Any] = latents.to(a_ )
lowercase : Any = latents * scheduler.init_noise_sigma
return latents
def a__ ( self , a_=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase : Dict = torch.device(F'''cuda:{gpu_id}''' )
lowercase : Optional[int] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a_ , a_ )
@property
def a__ ( self ) -> Tuple:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(a_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def a__ ( self , a_ , a_ , a_ , a_ , ) -> Optional[Any]:
if isinstance(a_ , a_ ) and isinstance(image[0] , torch.Tensor ):
lowercase : int = torch.cat(a_ , axis=0 ) if image[0].ndim == 4 else torch.stack(a_ , axis=0 )
if not isinstance(a_ , torch.Tensor ):
lowercase : str = self.image_processor(a_ , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
lowercase : Optional[int] = image.to(dtype=self.image_encoder.dtype , device=a_ )
lowercase : Union[str, Any] = self.image_encoder(a_ )["last_hidden_state"]
lowercase : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase : str = image_embeds.repeat_interleave(a_ , dim=0 )
if do_classifier_free_guidance:
lowercase : List[str] = torch.zeros_like(a_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase : Dict = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(a_ )
def __call__( self , a_ , a_ = 1 , a_ = 2_5 , a_ = None , a_ = None , a_ = 4.0 , a_ = 6_4 , a_ = "pil" , a_ = True , ) -> Tuple:
if isinstance(a_ , PIL.Image.Image ):
lowercase : Tuple = 1
elif isinstance(a_ , torch.Tensor ):
lowercase : Any = image.shape[0]
elif isinstance(a_ , a_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowercase : Any = len(a_ )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(a_ )}''' )
lowercase : Union[str, Any] = self._execution_device
lowercase : Union[str, Any] = batch_size * num_images_per_prompt
lowercase : Optional[Any] = guidance_scale > 1.0
lowercase : Union[str, Any] = self._encode_image(a_ , a_ , a_ , a_ )
# prior
self.scheduler.set_timesteps(a_ , device=a_ )
lowercase : int = self.scheduler.timesteps
lowercase : List[str] = self.prior.config.num_embeddings
lowercase : Any = self.prior.config.embedding_dim
lowercase : List[Any] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , a_ , a_ , a_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase : Tuple = latents.reshape(latents.shape[0] , a_ , a_ )
for i, t in enumerate(self.progress_bar(a_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase : List[str] = self.scheduler.scale_model_input(a_ , a_ )
lowercase : List[str] = self.prior(
a_ , timestep=a_ , proj_embedding=a_ , ).predicted_image_embedding
# remove the variance
lowercase , lowercase : str = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase , lowercase : Any = noise_pred.chunk(2 )
lowercase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase : Optional[Any] = self.scheduler.step(
a_ , timestep=a_ , sample=a_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=a_ )
lowercase : Dict = []
for i, latent in enumerate(a_ ):
print()
lowercase : int = self.renderer.decode(
latent[None, :] , a_ , size=a_ , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(a_ )
lowercase : Union[str, Any] = torch.stack(a_ )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
lowercase : List[Any] = images.cpu().numpy()
if output_type == "pil":
lowercase : List[Any] = [self.numpy_to_pil(a_ ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=a_ )
| 425
| 0
|
"""simple docstring"""
import string
import numpy
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = numpy.vectorize(lambda lowerCamelCase_ : x % 3_6 )
SCREAMING_SNAKE_CASE_ : int = numpy.vectorize(lowerCamelCase_ )
def __init__( self , lowerCAmelCase__ ) -> None:
SCREAMING_SNAKE_CASE = self.modulus(lowerCAmelCase__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
SCREAMING_SNAKE_CASE = encrypt_key.shape[0]
def __A ( self , lowerCAmelCase__ ) -> int:
return self.key_string.index(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> str:
return self.key_string[round(lowerCAmelCase__ )]
def __A ( self ) -> None:
SCREAMING_SNAKE_CASE = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
SCREAMING_SNAKE_CASE = det % len(self.key_string )
SCREAMING_SNAKE_CASE = len(self.key_string )
if greatest_common_divisor(lowerCAmelCase__ , len(self.key_string ) ) != 1:
SCREAMING_SNAKE_CASE = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> str:
SCREAMING_SNAKE_CASE = [char for char in text.upper() if char in self.key_string]
SCREAMING_SNAKE_CASE = chars[-1]
while len(lowerCAmelCase__ ) % self.break_key != 0:
chars.append(lowerCAmelCase__ )
return "".join(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> str:
SCREAMING_SNAKE_CASE = self.process_text(text.upper() )
SCREAMING_SNAKE_CASE = ''
for i in range(0 , len(lowerCAmelCase__ ) - self.break_key + 1 , self.break_key ):
SCREAMING_SNAKE_CASE = text[i : i + self.break_key]
SCREAMING_SNAKE_CASE = [self.replace_letters(lowerCAmelCase__ ) for char in batch]
SCREAMING_SNAKE_CASE = numpy.array([vec] ).T
SCREAMING_SNAKE_CASE = self.modulus(self.encrypt_key.dot(lowerCAmelCase__ ) ).T.tolist()[
0
]
SCREAMING_SNAKE_CASE = ''.join(
self.replace_digits(lowerCAmelCase__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __A ( self ) -> numpy.ndarray:
SCREAMING_SNAKE_CASE = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
SCREAMING_SNAKE_CASE = det % len(self.key_string )
SCREAMING_SNAKE_CASE = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
SCREAMING_SNAKE_CASE = i
break
SCREAMING_SNAKE_CASE = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(lowerCAmelCase__ ) )
def __A ( self , lowerCAmelCase__ ) -> str:
SCREAMING_SNAKE_CASE = self.make_decrypt_key()
SCREAMING_SNAKE_CASE = self.process_text(text.upper() )
SCREAMING_SNAKE_CASE = ''
for i in range(0 , len(lowerCAmelCase__ ) - self.break_key + 1 , self.break_key ):
SCREAMING_SNAKE_CASE = text[i : i + self.break_key]
SCREAMING_SNAKE_CASE = [self.replace_letters(lowerCAmelCase__ ) for char in batch]
SCREAMING_SNAKE_CASE = numpy.array([vec] ).T
SCREAMING_SNAKE_CASE = self.modulus(decrypt_key.dot(lowerCAmelCase__ ) ).T.tolist()[0]
SCREAMING_SNAKE_CASE = ''.join(
self.replace_digits(lowerCAmelCase__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowercase () -> None:
SCREAMING_SNAKE_CASE = int(input('Enter the order of the encryption key: ' ) )
SCREAMING_SNAKE_CASE = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for x in input().split()]
hill_matrix.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = HillCipher(numpy.array(SCREAMING_SNAKE_CASE_ ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
SCREAMING_SNAKE_CASE = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
SCREAMING_SNAKE_CASE = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(SCREAMING_SNAKE_CASE_ ) )
elif option == "2":
SCREAMING_SNAKE_CASE = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 247
|
"""simple docstring"""
from collections.abc import Callable
def lowercase (SCREAMING_SNAKE_CASE_ : Callable[[float], float] , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> float:
SCREAMING_SNAKE_CASE = a
SCREAMING_SNAKE_CASE = b
if function(SCREAMING_SNAKE_CASE_ ) == 0: # one of the a or b is a root for the function
return a
elif function(SCREAMING_SNAKE_CASE_ ) == 0:
return b
elif (
function(SCREAMING_SNAKE_CASE_ ) * function(SCREAMING_SNAKE_CASE_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
SCREAMING_SNAKE_CASE = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(SCREAMING_SNAKE_CASE_ ) == 0:
return mid
elif function(SCREAMING_SNAKE_CASE_ ) * function(SCREAMING_SNAKE_CASE_ ) < 0:
SCREAMING_SNAKE_CASE = mid
else:
SCREAMING_SNAKE_CASE = mid
SCREAMING_SNAKE_CASE = start + (end - start) / 2.0
return mid
def lowercase (SCREAMING_SNAKE_CASE_ : float ) -> float:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 247
| 1
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a : int = 1_00
_a : List[str] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def UpperCamelCase__ ( _A: int ):
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__lowerCamelCase = set()
__lowerCamelCase = 42
__lowerCamelCase = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCamelCase__ ( _A: int = 5000 ):
'''simple docstring'''
for number_to_partition in range(1 , _A ):
if len(partition(_A ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 571
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCamelCase__ ( _A: Tuple ):
'''simple docstring'''
return EnvironmentCommand()
def UpperCamelCase__ ( _A: Union[str, Any] ):
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file )
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( UpperCAmelCase ):
__lowerCamelCase = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCAmelCase )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCAmelCase , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCAmelCase )
def __init__( self , UpperCAmelCase , *UpperCAmelCase ):
__lowerCamelCase = accelerate_config_file
def lowerCamelCase_ ( self ):
__lowerCamelCase = """not installed"""
if is_safetensors_available():
import safetensors
__lowerCamelCase = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
__lowerCamelCase = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
__lowerCamelCase = """not installed"""
__lowerCamelCase = __lowerCamelCase = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__lowerCamelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCAmelCase ):
__lowerCamelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
__lowerCamelCase = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase , UpperCAmelCase )
else f'''\t{accelerate_config}'''
)
__lowerCamelCase = """not installed"""
__lowerCamelCase = """NA"""
if is_torch_available():
import torch
__lowerCamelCase = torch.__version__
__lowerCamelCase = torch.cuda.is_available()
__lowerCamelCase = """not installed"""
__lowerCamelCase = """NA"""
if is_tf_available():
import tensorflow as tf
__lowerCamelCase = tf.__version__
try:
# deprecated in v2.1
__lowerCamelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__lowerCamelCase = bool(tf.config.list_physical_devices("""GPU""" ) )
__lowerCamelCase = """not installed"""
__lowerCamelCase = """not installed"""
__lowerCamelCase = """not installed"""
__lowerCamelCase = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
__lowerCamelCase = flax.__version__
__lowerCamelCase = jax.__version__
__lowerCamelCase = jaxlib.__version__
__lowerCamelCase = jax.lib.xla_bridge.get_backend().platform
__lowerCamelCase = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCAmelCase ) )
return info
@staticmethod
def lowerCamelCase_ ( UpperCAmelCase ):
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 571
| 1
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = []
if isinstance(snake_case_,snake_case_ ):
for v in tree.values():
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_,(list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_,torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Any = []
for d in reversed(snake_case_ ):
idx.append(flat_idx % d )
_A : str = flat_idx // d
return tuple(reversed(snake_case_ ) )
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = None,snake_case_ = None,):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(snake_case_ ) -> None:
_A : Union[str, Any] = True
for i in range(len(snake_case_ ) ):
_A : Union[str, Any] = -1 * (i + 1)
l[reversed_idx] &= tally
_A : Any = l[reversed_idx]
if start_edges is None:
_A : Any = [s == 0 for s in start]
reduce_edge_list(snake_case_ )
if end_edges is None:
_A : str = [e == (d - 1) for e, d in zip(snake_case_,snake_case_ )]
reduce_edge_list(snake_case_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(snake_case_ ) == 0:
return [()]
elif len(snake_case_ ) == 1:
return [(slice(start[0],end[0] + 1 ),)]
_A : List[Tuple[slice, ...]] = []
_A : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(snake_case_,snake_case_ ):
if s == e:
path_list.append(slice(snake_case_,s + 1 ) )
else:
break
_A : Tuple[slice, ...] = tuple(snake_case_ )
_A : Optional[Any] = len(snake_case_ )
# start == end, and we're done
if divergence_idx == len(snake_case_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_A : Dict = start[divergence_idx]
return tuple(
path + (slice(snake_case_,sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :],[d - 1 for d in dims[divergence_idx + 1 :]],dims[divergence_idx + 1 :],start_edges=start_edges[divergence_idx + 1 :],end_edges=[True for _ in end_edges[divergence_idx + 1 :]],) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_A : List[Any] = end[divergence_idx]
return tuple(
path + (slice(snake_case_,edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]],end[divergence_idx + 1 :],dims[divergence_idx + 1 :],start_edges=[True for _ in start_edges[divergence_idx + 1 :]],end_edges=end_edges[divergence_idx + 1 :],) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx],end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx],end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1,end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
_A : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1,end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[Any] = t.shape[:no_batch_dims]
_A : Union[str, Any] = list(_flat_idx_to_idx(snake_case_,snake_case_ ) )
# _get_minimal_slice_set is inclusive
_A : Optional[Any] = list(_flat_idx_to_idx(flat_end - 1,snake_case_ ) )
# Get an ordered list of slices to perform
_A : Optional[int] = _get_minimal_slice_set(
snake_case_,snake_case_,snake_case_,)
_A : Union[str, Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ = False,snake_case_ = None,snake_case_ = False,):
if not (len(snake_case_ ) > 0):
raise ValueError("""Must provide at least one input""" )
_A : List[str] = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case_ )]
_A : Optional[int] = tuple([max(snake_case_ ) for s in zip(*snake_case_ )] )
def _prep_inputs(snake_case_ ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
_A : Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
_A : Optional[Any] = t.reshape(-1,*t.shape[no_batch_dims:] )
else:
_A : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
_A : Dict[str, Any] = tensor_tree_map(_prep_inputs,snake_case_ )
_A : List[Any] = None
if _out is not None:
_A : Optional[Any] = tensor_tree_map(lambda snake_case_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ),_out )
_A : int = 1
for d in orig_batch_dims:
flat_batch_dim *= d
_A : Optional[Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(snake_case_ ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
_A : int = 0
_A : Tuple = prepped_outputs
for _ in range(snake_case_ ):
# Chunk the input
if not low_mem:
_A : Union[str, Any] = _select_chunk
else:
_A : Optional[Any] = partial(
_chunk_slice,flat_start=snake_case_,flat_end=min(snake_case_,i + chunk_size ),no_batch_dims=len(snake_case_ ),)
_A : Dict[str, Any] = tensor_tree_map(snake_case_,snake_case_ )
# Run the layer on the chunk
_A : Any = layer(**snake_case_ )
# Allocate space for the output
if out is None:
_A : Any = tensor_tree_map(lambda snake_case_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ),snake_case_ )
# Put the chunk in its pre-allocated space
if isinstance(snake_case_,snake_case_ ):
def assign(snake_case_,snake_case_ ) -> None:
for k, v in da.items():
if isinstance(snake_case_,snake_case_ ):
assign(snake_case_,da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
_A : Dict = da[k]
assign(snake_case_,snake_case_ )
elif isinstance(snake_case_,snake_case_ ):
for xa, xa in zip(snake_case_,snake_case_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
_A : Tuple = xa
elif isinstance(snake_case_,torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
_A : Tuple = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
_A : Optional[Any] = tensor_tree_map(lambda snake_case_ : t.view(orig_batch_dims + t.shape[1:] ),snake_case_ )
return out
class lowercase :
def __init__( self , _a = 512 , ) -> List[str]:
_A : List[str] = max_chunk_size
_A : Optional[int] = None
_A : Optional[tuple] = None
def a__ ( self , _a , _a , _a ) -> int:
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
_A : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
_A : List[Any] = [c for c in candidates if c > min_chunk_size]
_A : Dict = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_a ) -> bool:
try:
with torch.no_grad():
fn(*_a , chunk_size=_a )
return True
except RuntimeError:
return False
_A : Union[str, Any] = 0
_A : Optional[int] = len(_a ) - 1
while i > min_viable_chunk_size_index:
_A : str = test_chunk_size(candidates[i] )
if not viable:
_A : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
_A : Tuple = i
_A : Union[str, Any] = (i + len(_a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def a__ ( self , _a , _a ) -> bool:
_A : List[Any] = True
for aa, aa in zip(_a , _a ):
assert type(_a ) == type(_a )
if isinstance(_a , (list, tuple) ):
consistent &= self._compare_arg_caches(_a , _a )
elif isinstance(_a , _a ):
_A : Optional[Any] = [v for _, v in sorted(aa.items() , key=lambda _a : x[0] )]
_A : Optional[int] = [v for _, v in sorted(aa.items() , key=lambda _a : x[0] )]
consistent &= self._compare_arg_caches(_a , _a )
else:
consistent &= aa == aa
return consistent
def a__ ( self , _a , _a , _a , ) -> int:
_A : Union[str, Any] = True
_A : tuple = tree_map(lambda _a : a.shape if isinstance(_a , torch.Tensor ) else a , _a , _a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_a )
_A : Optional[int] = self._compare_arg_caches(self.cached_arg_data , _a )
else:
# Otherwise, we can reuse the precomputed value
_A : Union[str, Any] = False
if not consistent:
_A : Union[str, Any] = self._determine_favorable_chunk_size(
_a , _a , _a , )
_A : str = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 307
|
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , _a , _a=3 , _a=32 , _a=3 , _a=10 , _a=[10, 20, 30, 40] , _a=[1, 1, 2, 1] , _a=True , _a=True , _a="relu" , _a=3 , _a=None , ) -> Union[str, Any]:
_A : List[str] = parent
_A : Optional[int] = batch_size
_A : int = image_size
_A : Optional[Any] = num_channels
_A : Any = embeddings_size
_A : Dict = hidden_sizes
_A : Any = depths
_A : List[Any] = is_training
_A : Optional[Any] = use_labels
_A : Tuple = hidden_act
_A : Dict = num_labels
_A : Union[str, Any] = scope
_A : Optional[Any] = len(_a )
def a__ ( self ) -> Dict:
_A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : List[str] = None
if self.use_labels:
_A : Tuple = ids_tensor([self.batch_size] , self.num_labels )
_A : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> List[str]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def a__ ( self , _a , _a , _a ) -> Optional[int]:
_A : Any = RegNetModel(config=_a )
model.to(_a )
model.eval()
_A : Union[str, Any] = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , _a , _a , _a ) -> Optional[int]:
_A : str = self.num_labels
_A : Any = RegNetForImageClassification(_a )
model.to(_a )
model.eval()
_A : str = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self ) -> str:
_A : Union[str, Any] = self.prepare_config_and_inputs()
_A , _A , _A : Tuple = config_and_inputs
_A : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
_a = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Union[str, Any]:
_A : Optional[int] = RegNetModelTester(self )
_A : Tuple = ConfigTester(self , config_class=_a , has_text_modality=_a )
def a__ ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ) -> Optional[Any]:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def a__ ( self ) -> Union[str, Any]:
pass
def a__ ( self ) -> Optional[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Dict = model_class(_a )
_A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : Dict = [*signature.parameters.keys()]
_A : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> str:
_A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Optional[Any]:
_A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def a__ ( self ) -> Optional[int]:
def check_hidden_states_output(_a , _a , _a ):
_A : str = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : List[str] = model(**self._prepare_for_class(_a , _a ) )
_A : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_A , _A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_A : Union[str, Any] = layer_type
_A : Tuple = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[int] = True
check_hidden_states_output(_a , _a , _a )
def a__ ( self ) -> Optional[int]:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> Tuple:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[Any] = RegNetModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a__ ( self ) -> str:
_A : Optional[int] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_A : Any = self.default_image_processor
_A : Optional[int] = prepare_img()
_A : Tuple = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : Union[str, Any] = model(**_a )
# verify the logits
_A : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : int = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 307
| 1
|
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
A_ = None
try:
import msvcrt
except ImportError:
A_ = None
try:
import fcntl
except ImportError:
A_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
A_ = OSError
# Data
# ------------------------------------------------
A_ = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
A_ = '''3.0.12'''
A_ = None
def A_ ( ):
global _logger
SCREAMING_SNAKE_CASE:int = _logger or logging.getLogger(__name__ )
return _logger
class _snake_case ( _UpperCAmelCase ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Dict ):
SCREAMING_SNAKE_CASE:List[str] = lock_file
return None
def __str__( self : List[str] ):
SCREAMING_SNAKE_CASE:int = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class _snake_case :
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:Dict = lock
return None
def __enter__( self : Any ):
return self.lock
def __exit__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int] ):
self.lock.release()
return None
class _snake_case :
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Dict=-1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE:int = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
SCREAMING_SNAKE_CASE:Optional[Any] = self.hash_filename_if_too_long(__UpperCamelCase ,__UpperCamelCase )
# The path to the lock file.
SCREAMING_SNAKE_CASE:Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
SCREAMING_SNAKE_CASE:str = None
# The default timeout value.
SCREAMING_SNAKE_CASE:Optional[int] = timeout
# We use this lock primarily for the lock counter.
SCREAMING_SNAKE_CASE:Union[str, Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
SCREAMING_SNAKE_CASE:Optional[int] = 0
return None
@property
def __UpperCamelCase ( self : Union[str, Any] ):
return self._lock_file
@property
def __UpperCamelCase ( self : Tuple ):
return self._timeout
@timeout.setter
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:List[str] = float(__UpperCamelCase )
return None
def __UpperCamelCase ( self : Optional[int] ):
raise NotImplementedError()
def __UpperCamelCase ( self : str ):
raise NotImplementedError()
@property
def __UpperCamelCase ( self : Optional[int] ):
return self._lock_file_fd is not None
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : int=0.05 ):
if timeout is None:
SCREAMING_SNAKE_CASE:Dict = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
SCREAMING_SNAKE_CASE:Any = id(self )
SCREAMING_SNAKE_CASE:str = self._lock_file
SCREAMING_SNAKE_CASE:Any = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(__UpperCamelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
SCREAMING_SNAKE_CASE:List[Any] = max(0 ,self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Tuple=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
SCREAMING_SNAKE_CASE:Optional[Any] = id(self )
SCREAMING_SNAKE_CASE:Dict = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
SCREAMING_SNAKE_CASE:List[Any] = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self : str ):
self.acquire()
return self
def __exit__( self : int ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[int] ):
self.release()
return None
def __del__( self : Any ):
self.release(force=__UpperCamelCase )
return None
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ):
SCREAMING_SNAKE_CASE:Any = os.path.basename(__UpperCamelCase )
if len(__UpperCamelCase ) > max_length and max_length > 0:
SCREAMING_SNAKE_CASE:Optional[Any] = os.path.dirname(__UpperCamelCase )
SCREAMING_SNAKE_CASE:List[Any] = str(hash(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE:Optional[Any] = filename[: max_length - len(__UpperCamelCase ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(__UpperCamelCase ,__UpperCamelCase )
else:
return path
class _snake_case ( _UpperCAmelCase ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Tuple=-1 ,SCREAMING_SNAKE_CASE__ : Dict=None ):
from .file_utils import relative_to_absolute_path
super().__init__(__UpperCamelCase ,timeout=__UpperCamelCase ,max_filename_length=__UpperCamelCase )
SCREAMING_SNAKE_CASE:Any = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
SCREAMING_SNAKE_CASE:Tuple = os.open(self._lock_file ,__UpperCamelCase )
except OSError:
pass
else:
try:
msvcrt.locking(__UpperCamelCase ,msvcrt.LK_NBLCK ,1 )
except OSError:
os.close(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE:str = fd
return None
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:List[Any] = self._lock_file_fd
SCREAMING_SNAKE_CASE:Union[str, Any] = None
msvcrt.locking(__UpperCamelCase ,msvcrt.LK_UNLCK ,1 )
os.close(__UpperCamelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _snake_case ( _UpperCAmelCase ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple=-1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE:Union[str, Any] = os.statvfs(os.path.dirname(__UpperCamelCase ) ).f_namemax
super().__init__(__UpperCamelCase ,timeout=__UpperCamelCase ,max_filename_length=__UpperCamelCase )
def __UpperCamelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE:str = os.O_RDWR | os.O_CREAT | os.O_TRUNC
SCREAMING_SNAKE_CASE:Any = os.open(self._lock_file ,__UpperCamelCase )
try:
fcntl.flock(__UpperCamelCase ,fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE:Dict = fd
return None
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:int = self._lock_file_fd
SCREAMING_SNAKE_CASE:Tuple = None
fcntl.flock(__UpperCamelCase ,fcntl.LOCK_UN )
os.close(__UpperCamelCase )
return None
class _snake_case ( _UpperCAmelCase ):
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:List[str] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
SCREAMING_SNAKE_CASE:List[str] = os.open(self._lock_file ,__UpperCamelCase )
except OSError:
pass
else:
SCREAMING_SNAKE_CASE:str = fd
return None
def __UpperCamelCase ( self : str ):
os.close(self._lock_file_fd )
SCREAMING_SNAKE_CASE:Optional[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
A_ = None
if msvcrt:
A_ = WindowsFileLock
elif fcntl:
A_ = UnixFileLock
else:
A_ = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 719
|
'''simple docstring'''
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Dict = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( snake_case = 5000 ):
SCREAMING_SNAKE_CASE:int = [(i * (3 * i - 1)) // 2 for i in range(1 , snake_case )]
for i, pentagonal_i in enumerate(snake_case ):
for j in range(snake_case , len(snake_case ) ):
SCREAMING_SNAKE_CASE:int = pentagonal_nums[j]
SCREAMING_SNAKE_CASE:Any = pentagonal_i + pentagonal_j
SCREAMING_SNAKE_CASE:int = pentagonal_j - pentagonal_i
if is_pentagonal(snake_case ) and is_pentagonal(snake_case ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 465
| 0
|
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
lowercase__ : List[Any] = 3_00 # TEMPERATURE (unit = K)
def __lowercase ( _a , _a , _a , ):
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
lowercase__ : str = random.Random()
def __lowercase ( _a , _a=1.0 , _a=None , _a=None ):
if rng is None:
snake_case_ : Tuple = global_rng
snake_case_ : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _UpperCAmelCase ( unittest.TestCase):
def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Tuple=7 , lowercase_ : Union[str, Any]=400 , lowercase_ : Tuple=2000 , lowercase_ : str=24 , lowercase_ : Any=24 , lowercase_ : str=0.0 , lowercase_ : str=16000 , lowercase_ : Any=True , lowercase_ : Tuple=True , ):
snake_case_ : Any = parent
snake_case_ : Dict = batch_size
snake_case_ : Tuple = min_seq_length
snake_case_ : List[str] = max_seq_length
snake_case_ : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ : Optional[Any] = feature_size
snake_case_ : Union[str, Any] = num_mel_bins
snake_case_ : List[str] = padding_value
snake_case_ : List[str] = sampling_rate
snake_case_ : str = return_attention_mask
snake_case_ : str = do_normalize
def _snake_case ( self : Any ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _snake_case ( self : Tuple , lowercase_ : Tuple=False , lowercase_ : Optional[int]=False ):
def _flatten(lowercase_ : Union[str, Any] ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
snake_case_ : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case_ : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ : Dict = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Any = SpeechaTextFeatureExtractor if is_speech_available() else None
def _snake_case ( self : str ):
snake_case_ : Union[str, Any] = SpeechaTextFeatureExtractionTester(self )
def _snake_case ( self : List[str] , lowercase_ : Union[str, Any] ):
self.assertTrue(np.all(np.mean(lowercase_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase_ , axis=0 ) - 1 ) < 1E-3 ) )
def _snake_case ( self : Optional[Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ : Any = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test feature size
snake_case_ : Tuple = feature_extractor(lowercase_ , padding=lowercase_ , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
snake_case_ : List[str] = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
snake_case_ : Optional[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test batched
snake_case_ : List[Any] = feature_extractor(lowercase_ , return_tensors='''np''' ).input_features
snake_case_ : List[Any] = feature_extractor(lowercase_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
snake_case_ : Dict = np.asarray(lowercase_ )
snake_case_ : Optional[int] = feature_extractor(lowercase_ , return_tensors='''np''' ).input_features
snake_case_ : Optional[int] = feature_extractor(lowercase_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def _snake_case ( self : Optional[Any] ):
snake_case_ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ : int = ['''longest''', '''max_length''', '''do_not_pad''']
snake_case_ : List[Any] = [None, 16, None]
for max_length, padding in zip(lowercase_ , lowercase_ ):
snake_case_ : Union[str, Any] = feature_extractor(
lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_attention_mask=lowercase_ )
snake_case_ : Optional[int] = inputs.input_features
snake_case_ : List[Any] = inputs.attention_mask
snake_case_ : Any = [np.sum(lowercase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _snake_case ( self : List[Any] ):
snake_case_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ : List[Any] = ['''longest''', '''max_length''', '''do_not_pad''']
snake_case_ : Dict = [None, 16, None]
for max_length, padding in zip(lowercase_ , lowercase_ ):
snake_case_ : Dict = feature_extractor(
lowercase_ , max_length=lowercase_ , padding=lowercase_ , return_tensors='''np''' , return_attention_mask=lowercase_ )
snake_case_ : Any = inputs.input_features
snake_case_ : Optional[Any] = inputs.attention_mask
snake_case_ : List[str] = [np.sum(lowercase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _snake_case ( self : Optional[Any] ):
snake_case_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ : Dict = feature_extractor(
lowercase_ , padding='''max_length''' , max_length=4 , truncation=lowercase_ , return_tensors='''np''' , return_attention_mask=lowercase_ , )
snake_case_ : int = inputs.input_features
snake_case_ : Any = inputs.attention_mask
snake_case_ : List[Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def _snake_case ( self : int ):
snake_case_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ : List[str] = feature_extractor(
lowercase_ , padding='''longest''' , max_length=4 , truncation=lowercase_ , return_tensors='''np''' , return_attention_mask=lowercase_ , )
snake_case_ : Union[str, Any] = inputs.input_features
snake_case_ : Any = inputs.attention_mask
snake_case_ : Optional[int] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ : str = feature_extractor(
lowercase_ , padding='''longest''' , max_length=16 , truncation=lowercase_ , return_tensors='''np''' , return_attention_mask=lowercase_ , )
snake_case_ : Optional[int] = inputs.input_features
snake_case_ : Optional[int] = inputs.attention_mask
snake_case_ : Union[str, Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def _snake_case ( self : Tuple ):
import torch
snake_case_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Union[str, Any] = np.random.rand(100 , 32 ).astype(np.floataa )
snake_case_ : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ : str = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case_ : List[str] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _snake_case ( self : List[str] , lowercase_ : List[str] ):
from datasets import load_dataset
snake_case_ : str = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
snake_case_ : int = ds.sort('''id''' ).select(range(lowercase_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _snake_case ( self : str ):
# fmt: off
snake_case_ : List[Any] = np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
] )
# fmt: on
snake_case_ : Tuple = self._load_datasamples(1 )
snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Tuple = feature_extractor(lowercase_ , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowercase_ , atol=1E-4 ) )
| 123
| 1
|
import operator as op
SCREAMING_SNAKE_CASE__ = '''scaler.pt'''
SCREAMING_SNAKE_CASE__ = '''pytorch_model'''
SCREAMING_SNAKE_CASE__ = '''random_states'''
SCREAMING_SNAKE_CASE__ = '''optimizer'''
SCREAMING_SNAKE_CASE__ = '''scheduler'''
SCREAMING_SNAKE_CASE__ = '''pytorch_model.bin'''
SCREAMING_SNAKE_CASE__ = '''pytorch_model.bin.index.json'''
SCREAMING_SNAKE_CASE__ = '''model.safetensors'''
SCREAMING_SNAKE_CASE__ = '''model.safetensors.index.json'''
SCREAMING_SNAKE_CASE__ = '''1.10.2'''
SCREAMING_SNAKE_CASE__ = '''py38'''
SCREAMING_SNAKE_CASE__ = '''4.17.0'''
SCREAMING_SNAKE_CASE__ = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
SCREAMING_SNAKE_CASE__ = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
SCREAMING_SNAKE_CASE__ = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
SCREAMING_SNAKE_CASE__ = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
SCREAMING_SNAKE_CASE__ = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
SCREAMING_SNAKE_CASE__ = '''2.0.1'''
SCREAMING_SNAKE_CASE__ = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
SCREAMING_SNAKE_CASE__ = ['''default''', '''reduce-overhead''', '''max-autotune''']
SCREAMING_SNAKE_CASE__ = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
SCREAMING_SNAKE_CASE__ = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
SCREAMING_SNAKE_CASE__ = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
SCREAMING_SNAKE_CASE__ = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 705
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
SCREAMING_SNAKE_CASE__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
SCREAMING_SNAKE_CASE__ = 12_8022
SCREAMING_SNAKE_CASE__ = 12_8028
@require_sentencepiece
class _UpperCamelCase( __lowerCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = MaMaaaTokenizer
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Dict = True
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
super().setUp()
__a : Dict = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__a : str = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__a : Any = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE__ , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(SCREAMING_SNAKE_CASE__ , save_dir / VOCAB_FILES_NAMES['spm_file'] )
__a : List[Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : Dict = '</s>'
__a : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : List[str] = self.get_tokenizer()
__a : Optional[int] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<s>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Dict = self.get_tokenizer()
__a : List[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [2, 3, 4, 5, 6] , )
__a : str = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
__a : Optional[int] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'This is a test' )
@slow
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : Tuple = {'input_ids': [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = '''facebook/m2m100_418M'''
__SCREAMING_SNAKE_CASE : List[Any] = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
__SCREAMING_SNAKE_CASE : Any = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
__SCREAMING_SNAKE_CASE : Optional[int] = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ):
'''simple docstring'''
__a : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en' , tgt_lang='fr' )
__a : Optional[int] = 1
return cls
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id('en' ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Dict = self.tokenizer.get_vocab()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'] , 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ) , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : Union[str, Any] = 'en'
__a : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids )
# fmt: off
__a : Any = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
__a : List[str] = self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
__a : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : List[Any] = tempfile.mkdtemp()
__a : List[str] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
__a : Tuple = MaMaaaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(new_tok.lang_token_to_id , SCREAMING_SNAKE_CASE__ )
@require_torch
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Dict = 'en'
__a : int = 'fr'
__a : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
__a : str = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__a : Optional[Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Optional[Any] = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__a : Optional[Any] = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : Optional[Any] = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__a : Optional[int] = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Union[str, Any] = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
# en_XX, A, test, EOS
'input_ids': [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 1_2_8_0_0_6,
} , )
| 577
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Tuple = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
lowerCAmelCase : Optional[Any] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ,_snake_case : Optional[Any] ,_snake_case : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = AudioClassificationPipeline(model=UpperCamelCase__ ,feature_extractor=UpperCamelCase__ )
# test with a raw waveform
lowercase__ : Optional[Any] = np.zeros((34_000,) )
lowercase__ : List[Any] = np.zeros((14_000,) )
return audio_classifier, [audioa, audio]
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ : str = examples
lowercase__ : List[str] = audio_classifier(UpperCamelCase__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
UpperCamelCase__ ,[
{'''score''': ANY(UpperCamelCase__ ), '''label''': ANY(UpperCamelCase__ )},
{'''score''': ANY(UpperCamelCase__ ), '''label''': ANY(UpperCamelCase__ )},
] ,)
lowercase__ : str = audio_classifier(UpperCamelCase__ ,top_k=1 )
self.assertEqual(
UpperCamelCase__ ,[
{'''score''': ANY(UpperCamelCase__ ), '''label''': ANY(UpperCamelCase__ )},
] ,)
self.run_torchaudio(UpperCamelCase__ )
@require_torchaudio
def UpperCAmelCase ( self : List[Any] ,_snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
import datasets
# test with a local file
lowercase__ : Any = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' ,'''clean''' ,split='''validation''' )
lowercase__ : Optional[Any] = dataset[0]['''audio''']['''array''']
lowercase__ : str = audio_classifier(UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ ,[
{'''score''': ANY(UpperCamelCase__ ), '''label''': ANY(UpperCamelCase__ )},
{'''score''': ANY(UpperCamelCase__ ), '''label''': ANY(UpperCamelCase__ )},
] ,)
@require_torch
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Dict = '''anton-l/wav2vec2-random-tiny-classifier'''
lowercase__ : Optional[Any] = pipeline('''audio-classification''' ,model=UpperCamelCase__ )
lowercase__ : List[str] = np.ones((8_000,) )
lowercase__ : str = audio_classifier(UpperCamelCase__ ,top_k=4 )
lowercase__ : int = [
{'''score''': 0.0842, '''label''': '''no'''},
{'''score''': 0.0838, '''label''': '''up'''},
{'''score''': 0.0837, '''label''': '''go'''},
{'''score''': 0.0834, '''label''': '''right'''},
]
lowercase__ : Tuple = [
{'''score''': 0.0845, '''label''': '''stop'''},
{'''score''': 0.0844, '''label''': '''on'''},
{'''score''': 0.0841, '''label''': '''right'''},
{'''score''': 0.0834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(UpperCamelCase__ ,decimals=4 ) ,[EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowercase__ : Tuple = {'''array''': np.ones((8_000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
lowercase__ : Tuple = audio_classifier(UpperCamelCase__ ,top_k=4 )
self.assertIn(nested_simplify(UpperCamelCase__ ,decimals=4 ) ,[EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
import datasets
lowercase__ : Union[str, Any] = '''superb/wav2vec2-base-superb-ks'''
lowercase__ : Dict = pipeline('''audio-classification''' ,model=UpperCamelCase__ )
lowercase__ : Optional[int] = datasets.load_dataset('''anton-l/superb_dummy''' ,'''ks''' ,split='''test''' )
lowercase__ : Tuple = np.array(dataset[3]['''speech'''] ,dtype=np.floataa )
lowercase__ : Union[str, Any] = audio_classifier(UpperCamelCase__ ,top_k=4 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ,decimals=3 ) ,[
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
] ,)
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
pass
| 560
|
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple ):
"""simple docstring"""
UpperCamelCase = [2, 1, 2, -1]
UpperCamelCase = [1, 2, 3, 4]
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = len(self.first_signal )
UpperCamelCase = len(self.second_signal )
UpperCamelCase = max(UpperCamelCase__ , UpperCamelCase__ )
# create a zero matrix of max_length x max_length
UpperCamelCase = [[0] * max_length for i in range(UpperCamelCase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCamelCase__ ):
UpperCamelCase = deque(self.second_signal )
rotated_signal.rotate(UpperCamelCase__ )
for j, item in enumerate(UpperCamelCase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase = np.matmul(np.transpose(UpperCamelCase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCamelCase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 430
| 0
|
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase ( UpperCamelCase__ : Tuple="ro" , UpperCamelCase__ : Any="en" , UpperCamelCase__ : Dict="wmt16" , UpperCamelCase__ : Dict=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
__UpperCAmelCase = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
__UpperCAmelCase = datasets.load_dataset(UpperCamelCase__ , UpperCamelCase__ )
if save_dir is None:
__UpperCAmelCase = f"""{dataset}-{pair}"""
__UpperCAmelCase = Path(UpperCamelCase__ )
save_dir.mkdir(exist_ok=UpperCamelCase__ )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
__UpperCAmelCase = '''val''' if split == '''validation''' else split
__UpperCAmelCase = save_dir.joinpath(f"""{fn}.source""" )
__UpperCAmelCase = save_dir.joinpath(f"""{fn}.target""" )
__UpperCAmelCase = src_path.open('''w+''' )
__UpperCAmelCase = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__UpperCAmelCase = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 712
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[str] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : List[str] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : List[Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Optional[Any] , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Tuple , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : str , **__a : Tuple ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : int ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : List[str] , **__a : Optional[int] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Any ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Dict , **__a : List[str] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Optional[int] , **__a : Optional[int] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[str] , **__a : List[str] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[int] , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : str ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Any ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : str , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Optional[int] , **__a : Union[str, Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Union[str, Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Union[str, Any] , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : int , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : str ) -> Dict:
requires_backends(cls , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : str , **UpperCamelCase__ : str ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : str , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : str , **__a : List[str] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : List[Any] , **__a : List[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : Tuple ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : str , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : str ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : Tuple ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Tuple , **__a : str ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : str , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : int , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : str , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : int , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Union[str, Any] , **__a : Optional[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[Any] , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Dict ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Union[str, Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : Dict ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Tuple , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : Any ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Optional[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : Optional[int] , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Any , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : int , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Tuple , **__a : Optional[int] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : Tuple ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Union[str, Any] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[Any] , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : int , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Any , **__a : int ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Dict ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : int , **__a : Optional[int] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Dict , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Any , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : Tuple , **__a : Optional[int] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Optional[Any] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : Dict ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Union[str, Any] , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Any , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : List[Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Dict , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : Union[str, Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : int ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Optional[Any] , **__a : int ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[Any] , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Optional[Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[int] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[str] , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Tuple , **__a : Tuple ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[str] , **__a : int ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Tuple , **__a : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Any , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : str ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[str] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : str , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[Any] , **__a : List[str] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[str] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : str , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Tuple ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Any , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Tuple ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : int , **__a : Optional[Any] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Optional[int] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[str] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : List[str] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
| 654
| 0
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A__ : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A__ : Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print("""\n""".join(upper_files) + """\n""")
A__ : int = [file for file in filepaths if """ """ in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print("""\n""".join(space_files) + """\n""")
A__ : str = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print("""\n""".join(hyphen_files) + """\n""")
A__ : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print("""\n""".join(nodir_files) + """\n""")
A__ : str = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 233
|
from math import isclose, sqrt
def _a ( __UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ):
lowerCAmelCase__ : Union[str, Any] = point_y / 4 / point_x
lowerCAmelCase__ : str = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase__ : str = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase__ : List[Any] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase__ : str = outgoing_gradient**2 + 4
lowerCAmelCase__ : List[Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase__ : Tuple = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowerCAmelCase__ : Optional[Any] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase__ : Any = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase__ : Tuple = x_minus if isclose(__UpperCamelCase ,__UpperCamelCase ) else x_plus
lowerCAmelCase__ : Dict = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _a ( __UpperCamelCase : float = 1.4 ,__UpperCamelCase : float = -9.6 ):
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : float = first_x_coord
lowerCAmelCase__ : float = first_y_coord
lowerCAmelCase__ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = next_point(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""")
| 233
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _A (__a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = LxmertConfig.from_json_file(lowercase_ )
print(f'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LxmertForPreTraining(lowercase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowercase_ )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 720
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
SCREAMING_SNAKE_CASE_ : List[str] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(lowercase_) , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = np.random.randn(3 , 4)
self.assertTrue(np.allclose(transpose(lowercase_) , x.transpose()))
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0)) , x.transpose((1, 2, 0))))
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3 , 4)
SCREAMING_SNAKE_CASE_ : str = torch.tensor(lowercase_)
self.assertTrue(np.allclose(transpose(lowercase_) , transpose(lowercase_).numpy()))
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3 , 4 , 5)
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(lowercase_)
self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0)) , transpose(lowercase_ , axes=(1, 2, 0)).numpy()))
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3 , 4)
SCREAMING_SNAKE_CASE_ : str = tf.constant(lowercase_)
self.assertTrue(np.allclose(transpose(lowercase_) , transpose(lowercase_).numpy()))
SCREAMING_SNAKE_CASE_ : Optional[int] = np.random.randn(3 , 4 , 5)
SCREAMING_SNAKE_CASE_ : int = tf.constant(lowercase_)
self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0)) , transpose(lowercase_ , axes=(1, 2, 0)).numpy()))
@require_flax
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3 , 4)
SCREAMING_SNAKE_CASE_ : Any = jnp.array(lowercase_)
self.assertTrue(np.allclose(transpose(lowercase_) , np.asarray(transpose(lowercase_))))
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3 , 4 , 5)
SCREAMING_SNAKE_CASE_ : Any = jnp.array(lowercase_)
self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0)) , np.asarray(transpose(lowercase_ , axes=(1, 2, 0)))))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3 , 4)
self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3)) , np.reshape(lowercase_ , (4, 3))))
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5)) , np.reshape(lowercase_ , (12, 5))))
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3 , 4)
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(lowercase_)
self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3)) , reshape(lowercase_ , (4, 3)).numpy()))
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3 , 4 , 5)
SCREAMING_SNAKE_CASE_ : int = torch.tensor(lowercase_)
self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5)) , reshape(lowercase_ , (12, 5)).numpy()))
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.random.randn(3 , 4)
SCREAMING_SNAKE_CASE_ : int = tf.constant(lowercase_)
self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3)) , reshape(lowercase_ , (4, 3)).numpy()))
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3 , 4 , 5)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(lowercase_)
self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5)) , reshape(lowercase_ , (12, 5)).numpy()))
@require_flax
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3 , 4)
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(lowercase_)
self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3)) , np.asarray(reshape(lowercase_ , (4, 3)))))
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.random.randn(3 , 4 , 5)
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(lowercase_)
self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5)) , np.asarray(reshape(lowercase_ , (12, 5)))))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1 , 3 , 4)
self.assertTrue(np.allclose(squeeze(lowercase_) , np.squeeze(lowercase_)))
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1 , 4 , 1 , 5)
self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2) , np.squeeze(lowercase_ , axis=2)))
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(1 , 3 , 4)
SCREAMING_SNAKE_CASE_ : int = torch.tensor(lowercase_)
self.assertTrue(np.allclose(squeeze(lowercase_) , squeeze(lowercase_).numpy()))
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5)
SCREAMING_SNAKE_CASE_ : str = torch.tensor(lowercase_)
self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2) , squeeze(lowercase_ , axis=2).numpy()))
@require_tf
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1 , 3 , 4)
SCREAMING_SNAKE_CASE_ : Dict = tf.constant(lowercase_)
self.assertTrue(np.allclose(squeeze(lowercase_) , squeeze(lowercase_).numpy()))
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1 , 4 , 1 , 5)
SCREAMING_SNAKE_CASE_ : Any = tf.constant(lowercase_)
self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2) , squeeze(lowercase_ , axis=2).numpy()))
@require_flax
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(1 , 3 , 4)
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.array(lowercase_)
self.assertTrue(np.allclose(squeeze(lowercase_) , np.asarray(squeeze(lowercase_))))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5)
SCREAMING_SNAKE_CASE_ : str = jnp.array(lowercase_)
self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2) , np.asarray(squeeze(lowercase_ , axis=2))))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3 , 4)
self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1) , np.expand_dims(lowercase_ , axis=1)))
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3 , 4)
SCREAMING_SNAKE_CASE_ : int = torch.tensor(lowercase_)
self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1) , expand_dims(lowercase_ , axis=1).numpy()))
@require_tf
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3 , 4)
SCREAMING_SNAKE_CASE_ : Any = tf.constant(lowercase_)
self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1) , expand_dims(lowercase_ , axis=1).numpy()))
@require_flax
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3 , 4)
SCREAMING_SNAKE_CASE_ : int = jnp.array(lowercase_)
self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1) , np.asarray(expand_dims(lowercase_ , axis=1))))
| 176
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = 10
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = [1, 2, 3, 4]
UpperCAmelCase__ : Tuple = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_lowerCamelCase , self.block_size , 0 ) , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase__ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_lowerCamelCase , self.block_size , 0 ) , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase__ : Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_lowerCamelCase , self.block_size , 0 ) , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
UpperCAmelCase__ , UpperCAmelCase__ : Any = process_story(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , [] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = """"""
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = process_story(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , [] )
self.assertEqual(_lowerCamelCase , [] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = process_story(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : List[Any] = ["""It was the best of times."""]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = torch.tensor([1, 2, 3, 4] )
UpperCAmelCase__ : str = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_lowerCamelCase , 0 ).numpy() , expected.numpy() )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
UpperCAmelCase__ : Union[str, Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_lowerCamelCase , 23 ).numpy() , expected.numpy() )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
UpperCAmelCase__ : Any = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_lowerCamelCase , 1 ).numpy() , expected.numpy() )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = 101
UpperCAmelCase__ : Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
UpperCAmelCase__ : Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
UpperCAmelCase__ : int = compute_token_type_ids(_lowerCamelCase , _lowerCamelCase )
np.testing.assert_array_equal(_lowerCamelCase , _lowerCamelCase )
| 182
|
"""simple docstring"""
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : Tuple = 1
while len(lowerCAmelCase ) < 1E6:
constant.append(str(lowerCAmelCase ) )
i += 1
UpperCAmelCase__ : Any = """""".join(lowerCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution())
| 182
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0, 0, 0
UpperCAmelCase = ugly_nums[ia] * 2
UpperCAmelCase = ugly_nums[ia] * 3
UpperCAmelCase = ugly_nums[ia] * 5
for _ in range(1 , lowerCAmelCase ):
UpperCAmelCase = min(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
ugly_nums.append(lowerCAmelCase )
if next_num == next_a:
ia += 1
UpperCAmelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'{ugly_numbers(2_0_0) = }')
| 378
|
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [0] * no_of_processes
UpperCAmelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowerCAmelCase ):
UpperCAmelCase = burst_time[i]
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 999999999
UpperCAmelCase = 0
UpperCAmelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowerCAmelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCAmelCase = remaining_time[j]
UpperCAmelCase = j
UpperCAmelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCAmelCase = remaining_time[short]
if minm == 0:
UpperCAmelCase = 999999999
if remaining_time[short] == 0:
complete += 1
UpperCAmelCase = False
# Find finish time of current process
UpperCAmelCase = increment_time + 1
# Calculate waiting time
UpperCAmelCase = finish_time - arrival_time[short]
UpperCAmelCase = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCAmelCase = 0
# Increment time
increment_time += 1
return waiting_time
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [0] * no_of_processes
for i in range(lowerCAmelCase ):
UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = 0
UpperCAmelCase = 0
for i in range(lowerCAmelCase ):
UpperCAmelCase = total_waiting_time + waiting_time[i]
UpperCAmelCase = total_turn_around_time + turn_around_time[i]
print(F'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
lowerCAmelCase_ : Optional[Any] = int(input())
lowerCAmelCase_ : List[Any] = [0] * no_of_processes
lowerCAmelCase_ : Optional[Any] = [0] * no_of_processes
lowerCAmelCase_ : int = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = map(int, input().split())
lowerCAmelCase_ : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCAmelCase_ : str = burst_time
lowerCAmelCase_ : List[Any] = no_of_processes
lowerCAmelCase_ : int = waiting_time
lowerCAmelCase_ : int = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowerCAmelCase_ : List[Any] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 378
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Tuple = "vit_mae"
def __init__( self , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.0 , _A=0.0 , _A=0.02 , _A=1e-12 , _A=224 , _A=16 , _A=3 , _A=True , _A=16 , _A=512 , _A=8 , _A=2048 , _A=0.75 , _A=False , **_A , ) -> Tuple:
"""simple docstring"""
super().__init__(**_A)
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : str = layer_norm_eps
_UpperCAmelCase : Optional[int] = image_size
_UpperCAmelCase : Union[str, Any] = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Optional[int] = qkv_bias
_UpperCAmelCase : List[Any] = decoder_num_attention_heads
_UpperCAmelCase : Optional[Any] = decoder_hidden_size
_UpperCAmelCase : Any = decoder_num_hidden_layers
_UpperCAmelCase : Optional[int] = decoder_intermediate_size
_UpperCAmelCase : Optional[int] = mask_ratio
_UpperCAmelCase : List[str] = norm_pix_loss
| 485
|
from math import asin, atan, cos, radians, sin, sqrt, tan
SCREAMING_SNAKE_CASE = 6_37_81_37.0
SCREAMING_SNAKE_CASE = 6_35_67_52.31_42_45
SCREAMING_SNAKE_CASE = 6378137
def _lowerCamelCase ( __A : float , __A : float , __A : float , __A : float ) -> float:
_UpperCAmelCase : Any = (AXIS_A - AXIS_B) / AXIS_A
_UpperCAmelCase : str = atan((1 - flattening) * tan(radians(__A ) ) )
_UpperCAmelCase : List[Any] = atan((1 - flattening) * tan(radians(__A ) ) )
_UpperCAmelCase : Dict = radians(__A )
_UpperCAmelCase : List[str] = radians(__A )
# Equation
_UpperCAmelCase : Optional[Any] = sin((phi_a - phi_a) / 2 )
_UpperCAmelCase : Optional[Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_UpperCAmelCase : Any = sqrt(sin_sq_phi + (cos(__A ) * cos(__A ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 485
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__a : str = logging.get_logger(__name__)
__a : Any = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Tuple = """longformer"""
def __init__( self , lowerCamelCase = 512 , lowerCamelCase = 2 , lowerCamelCase = 1 , lowerCamelCase = 0 , lowerCamelCase = 2 , lowerCamelCase = 30522 , lowerCamelCase = 768 , lowerCamelCase = 12 , lowerCamelCase = 12 , lowerCamelCase = 3072 , lowerCamelCase = "gelu" , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 512 , lowerCamelCase = 2 , lowerCamelCase = 0.02 , lowerCamelCase = 1E-12 , lowerCamelCase = False , **lowerCamelCase , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
lowercase__ : Dict = attention_window
lowercase__ : Optional[int] = sep_token_id
lowercase__ : List[Any] = bos_token_id
lowercase__ : List[str] = eos_token_id
lowercase__ : Union[str, Any] = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Tuple = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Optional[Any] = hidden_act
lowercase__ : int = intermediate_size
lowercase__ : Optional[int] = hidden_dropout_prob
lowercase__ : Optional[int] = attention_probs_dropout_prob
lowercase__ : Optional[Any] = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Any = initializer_range
lowercase__ : Any = layer_norm_eps
lowercase__ : List[Any] = onnx_export
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase = "default" , lowerCamelCase = None ) -> Any:
"""simple docstring"""
super().__init__(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowercase__ : str = True
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : Union[str, Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ : Optional[Any] = super().outputs
if self.task == "default":
lowercase__ : Optional[Any] = {0: "batch"}
return outputs
@property
def __a ( self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __a ( self ) -> int:
"""simple docstring"""
return max(super().default_onnx_opset , 14 )
def __a ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ) -> Mapping[str, Any]:
"""simple docstring"""
lowercase__ : int = super().generate_dummy_inputs(
preprocessor=lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowercase__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
lowercase__ : List[Any] = 1
return inputs
| 712
|
from __future__ import annotations
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> list[int]:
lowercase__ : List[str] = [True] * limit
lowercase__ : Union[str, Any] = False
lowercase__ : List[str] = False
lowercase__ : List[str] = True
for i in range(3 ,int(limit**0.5 + 1 ) ,2 ):
lowercase__ : Dict = i * 2
while index < limit:
lowercase__ : Union[str, Any] = False
lowercase__ : str = index + i
lowercase__ : Union[str, Any] = [2]
for i in range(3 ,SCREAMING_SNAKE_CASE_ ,2 ):
if is_prime[i]:
primes.append(SCREAMING_SNAKE_CASE_ )
return primes
def snake_case_ ( SCREAMING_SNAKE_CASE_ = 1_00_00_00 ) -> int:
lowercase__ : Any = prime_sieve(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = 0
lowercase__ : Dict = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
for j in range(i + length ,len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ : Optional[Any] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase__ : Dict = j - i
lowercase__ : Any = sol
return largest
if __name__ == "__main__":
print(f'{solution() = }')
| 298
| 0
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
assert column_title.isupper()
__a = 0
__a = len(_SCREAMING_SNAKE_CASE ) - 1
__a = 0
while index >= 0:
__a = (ord(column_title[index] ) - 64) * pow(26 , _SCREAMING_SNAKE_CASE )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 225
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
__a = len(_SCREAMING_SNAKE_CASE )
for _ in range(_SCREAMING_SNAKE_CASE ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__a , __a = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
lowerCamelCase__ = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 225
| 1
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( _lowercase ) -> Optional[int]:
_lowercase : Union[str, Any] = str(_snake_case )
return n == n[::-1]
def __UpperCamelCase ( _lowercase = 100_0000 ) -> Any:
_lowercase : int = 0
for i in range(1, _snake_case ):
if is_palindrome(_snake_case ) and is_palindrome(bin(_snake_case ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 710
|
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase , _lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''DPTFeatureExtractor''']
__snake_case = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 189
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def a ( __a = "AAPL" ) -> str:
'''simple docstring'''
UpperCamelCase__ :str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
UpperCamelCase__ :Tuple = BeautifulSoup(requests.get(__a ).text , '''html.parser''' )
UpperCamelCase__ :Tuple = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 189
| 1
|
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = jnp.floataa
lowerCamelCase = True
def snake_case__ ( self : int )-> Tuple:
'''simple docstring'''
super().setup()
A__ = nn.Dense(5,dtype=self.dtype )
def __call__( self : str,*lowercase_ : str,**lowercase_ : Optional[int] )-> List[str]:
'''simple docstring'''
A__ = super().__call__(*lowercase_,**lowercase_ )
A__ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = FlaxBigBirdForNaturalQuestionsModule
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
'''simple docstring'''
def cross_entropy(SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str=None ):
A__ = logits.shape[-1]
A__ = (labels[..., None] == jnp.arange(SCREAMING_SNAKE_CASE__ )[None]).astype('f4' )
A__ = jax.nn.log_softmax(SCREAMING_SNAKE_CASE__ , axis=-1 )
A__ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
A__ = reduction(SCREAMING_SNAKE_CASE__ )
return loss
A__ = partial(SCREAMING_SNAKE_CASE__ , reduction=jnp.mean )
A__ = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class A :
"""simple docstring"""
lowerCamelCase = 'google/bigbird-roberta-base'
lowerCamelCase = 30_00
lowerCamelCase = 1_05_00
lowerCamelCase = 1_28
lowerCamelCase = 3
lowerCamelCase = 1
lowerCamelCase = 5
# tx_args
lowerCamelCase = 3E-5
lowerCamelCase = 0.0
lowerCamelCase = 2_00_00
lowerCamelCase = 0.0_095
lowerCamelCase = 'bigbird-roberta-natural-questions'
lowerCamelCase = 'training-expt'
lowerCamelCase = 'data/nq-training.jsonl'
lowerCamelCase = 'data/nq-validation.jsonl'
def snake_case__ ( self : List[Any] )-> Union[str, Any]:
'''simple docstring'''
os.makedirs(self.base_dir,exist_ok=lowercase_ )
A__ = os.path.join(self.base_dir,self.save_dir )
A__ = self.batch_size_per_device * jax.device_count()
@dataclass
class A :
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 40_96 # no dynamic padding on TPUs
def __call__( self : int,lowercase_ : List[str] )-> Dict:
'''simple docstring'''
A__ = self.collate_fn(lowercase_ )
A__ = jax.tree_util.tree_map(lowercase_,lowercase_ )
return batch
def snake_case__ ( self : Union[str, Any],lowercase_ : List[Any] )-> int:
'''simple docstring'''
A__ , A__ = self.fetch_inputs(features['input_ids'] )
A__ = {
'input_ids': jnp.array(lowercase_,dtype=jnp.intaa ),
'attention_mask': jnp.array(lowercase_,dtype=jnp.intaa ),
'start_labels': jnp.array(features['start_token'],dtype=jnp.intaa ),
'end_labels': jnp.array(features['end_token'],dtype=jnp.intaa ),
'pooled_labels': jnp.array(features['category'],dtype=jnp.intaa ),
}
return batch
def snake_case__ ( self : Any,lowercase_ : list )-> Optional[int]:
'''simple docstring'''
A__ = [self._fetch_inputs(lowercase_ ) for ids in input_ids]
return zip(*lowercase_ )
def snake_case__ ( self : int,lowercase_ : list )-> int:
'''simple docstring'''
A__ = [1 for _ in range(len(lowercase_ ) )]
while len(lowercase_ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=None ) -> Any:
'''simple docstring'''
if seed is not None:
A__ = dataset.shuffle(seed=SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) // batch_size ):
A__ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(SCREAMING_SNAKE_CASE__ )
@partial(jax.pmap , axis_name='batch' )
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
def loss_fn(SCREAMING_SNAKE_CASE__ : str ):
A__ = model_inputs.pop('start_labels' )
A__ = model_inputs.pop('end_labels' )
A__ = model_inputs.pop('pooled_labels' )
A__ = state.apply_fn(**SCREAMING_SNAKE_CASE__ , params=SCREAMING_SNAKE_CASE__ , dropout_rng=SCREAMING_SNAKE_CASE__ , train=SCREAMING_SNAKE_CASE__ )
A__ , A__ , A__ = outputs
return state.loss_fn(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
A__ , A__ = jax.random.split(SCREAMING_SNAKE_CASE__ )
A__ = jax.value_and_grad(SCREAMING_SNAKE_CASE__ )
A__ , A__ = grad_fn(state.params )
A__ = jax.lax.pmean({'loss': loss} , axis_name='batch' )
A__ = jax.lax.pmean(SCREAMING_SNAKE_CASE__ , 'batch' )
A__ = state.apply_gradients(grads=SCREAMING_SNAKE_CASE__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A__ = model_inputs.pop('start_labels' )
A__ = model_inputs.pop('end_labels' )
A__ = model_inputs.pop('pooled_labels' )
A__ = state.apply_fn(**SCREAMING_SNAKE_CASE__ , params=state.params , train=SCREAMING_SNAKE_CASE__ )
A__ , A__ , A__ = outputs
A__ = state.loss_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class A ( train_state.TrainState ):
"""simple docstring"""
lowerCamelCase = struct.field(pytree_node=_UpperCAmelCase )
@dataclass
class A :
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = None
def snake_case__ ( self : str,lowercase_ : Dict,lowercase_ : List[str],lowercase_ : Optional[Any],lowercase_ : Dict=None )-> Optional[Any]:
'''simple docstring'''
A__ = model.params
A__ = TrainState.create(
apply_fn=model.__call__,params=lowercase_,tx=lowercase_,loss_fn=lowercase_,)
if ckpt_dir is not None:
A__ , A__ , A__ , A__ , A__ = restore_checkpoint(lowercase_,lowercase_ )
A__ = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
A__ , A__ = build_tx(**lowercase_ )
A__ = train_state.TrainState(
step=lowercase_,apply_fn=model.__call__,params=lowercase_,tx=lowercase_,opt_state=lowercase_,)
A__ = args
A__ = data_collator
A__ = lr
A__ = params
A__ = jax_utils.replicate(lowercase_ )
return state
def snake_case__ ( self : Tuple,lowercase_ : Tuple,lowercase_ : Optional[int],lowercase_ : Optional[int] )-> int:
'''simple docstring'''
A__ = self.args
A__ = len(lowercase_ ) // args.batch_size
A__ = jax.random.PRNGKey(0 )
A__ = jax.random.split(lowercase_,jax.device_count() )
for epoch in range(args.max_epochs ):
A__ = jnp.array(0,dtype=jnp.floataa )
A__ = get_batched_dataset(lowercase_,args.batch_size,seed=lowercase_ )
A__ = 0
for batch in tqdm(lowercase_,total=lowercase_,desc=F'Running EPOCH-{epoch}' ):
A__ = self.data_collator(lowercase_ )
A__ , A__ , A__ = self.train_step_fn(lowercase_,lowercase_,**lowercase_ )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
if i % args.logging_steps == 0:
A__ = jax_utils.unreplicate(state.step )
A__ = running_loss.item() / i
A__ = self.scheduler_fn(state_step - 1 )
A__ = self.evaluate(lowercase_,lowercase_ )
A__ = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(lowercase_ ) )
self.logger.log(lowercase_,commit=lowercase_ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}',state=lowercase_ )
def snake_case__ ( self : str,lowercase_ : Any,lowercase_ : Dict )-> Dict:
'''simple docstring'''
A__ = get_batched_dataset(lowercase_,self.args.batch_size )
A__ = len(lowercase_ ) // self.args.batch_size
A__ = jnp.array(0,dtype=jnp.floataa )
A__ = 0
for batch in tqdm(lowercase_,total=lowercase_,desc='Evaluating ... ' ):
A__ = self.data_collator(lowercase_ )
A__ = self.val_step_fn(lowercase_,**lowercase_ )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
return running_loss / i
def snake_case__ ( self : int,lowercase_ : List[str],lowercase_ : Optional[Any] )-> str:
'''simple docstring'''
A__ = jax_utils.unreplicate(lowercase_ )
print(F'SAVING CHECKPOINT IN {save_dir}',end=' ... ' )
self.model_save_fn(lowercase_,params=state.params )
with open(os.path.join(lowercase_,'opt_state.msgpack' ),'wb' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args,os.path.join(lowercase_,'args.joblib' ) )
joblib.dump(self.data_collator,os.path.join(lowercase_,'data_collator.joblib' ) )
with open(os.path.join(lowercase_,'training_state.json' ),'w' ) as f:
json.dump({'step': state.step.item()},lowercase_ )
print('DONE' )
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
'''simple docstring'''
print(f'RESTORING CHECKPOINT FROM {save_dir}' , end=' ... ' )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'flax_model.msgpack' ) , 'rb' ) as f:
A__ = from_bytes(state.params , f.read() )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'opt_state.msgpack' ) , 'rb' ) as f:
A__ = from_bytes(state.opt_state , f.read() )
A__ = joblib.load(os.path.join(SCREAMING_SNAKE_CASE__ , 'args.joblib' ) )
A__ = joblib.load(os.path.join(SCREAMING_SNAKE_CASE__ , 'data_collator.joblib' ) )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'training_state.json' ) , 'r' ) as f:
A__ = json.load(SCREAMING_SNAKE_CASE__ )
A__ = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = num_train_steps - warmup_steps
A__ = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE__ , end_value=SCREAMING_SNAKE_CASE__ , transition_steps=SCREAMING_SNAKE_CASE__ )
A__ = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE__ , end_value=1E-7 , transition_steps=SCREAMING_SNAKE_CASE__ )
A__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
'''simple docstring'''
def weight_decay_mask(SCREAMING_SNAKE_CASE__ : Optional[int] ):
A__ = traverse_util.flatten_dict(SCREAMING_SNAKE_CASE__ )
A__ = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(SCREAMING_SNAKE_CASE__ )
A__ = scheduler_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = optax.adamw(learning_rate=SCREAMING_SNAKE_CASE__ , weight_decay=SCREAMING_SNAKE_CASE__ , mask=SCREAMING_SNAKE_CASE__ )
return tx, lr
| 700
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
'''simple docstring'''
A__ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
A__ , A__ = emb.weight.shape
A__ = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
A__ = emb.weight.data
return lin_layer
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
'''simple docstring'''
A__ = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
A__ = mam_aaa['args'] or mam_aaa['cfg']['model']
A__ = mam_aaa['model']
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
A__ = state_dict['encoder.embed_tokens.weight'].shape[0]
A__ = MaMaaaConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
A__ = state_dict['decoder.embed_tokens.weight']
A__ = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
model.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
A__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowercase_ = parser.parse_args()
lowercase_ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 586
| 0
|
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( lowercase , unittest.TestCase ):
lowerCamelCase_ : Optional[Any] = XLMTokenizer
lowerCamelCase_ : Union[str, Any] = False
def _snake_case ( self : str):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ :Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
SCREAMING_SNAKE_CASE_ :List[str] = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase))))
SCREAMING_SNAKE_CASE_ :List[str] = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
SCREAMING_SNAKE_CASE_ :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_ :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w") as fp:
fp.write(json.dumps(UpperCAmelCase))
with open(self.merges_file , "w") as fp:
fp.write("\n".join(UpperCAmelCase))
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : Optional[Any]):
SCREAMING_SNAKE_CASE_ :Any = "lower newer"
SCREAMING_SNAKE_CASE_ :List[str] = "lower newer"
return input_text, output_text
def _snake_case ( self : Dict):
SCREAMING_SNAKE_CASE_ :int = XLMTokenizer(self.vocab_file , self.merges_file)
SCREAMING_SNAKE_CASE_ :int = "lower"
SCREAMING_SNAKE_CASE_ :Any = ["low", "er</w>"]
SCREAMING_SNAKE_CASE_ :List[Any] = tokenizer.tokenize(UpperCAmelCase)
self.assertListEqual(UpperCAmelCase , UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :str = tokens + ["<unk>"]
SCREAMING_SNAKE_CASE_ :List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase) , UpperCAmelCase)
@slow
def _snake_case ( self : Any):
SCREAMING_SNAKE_CASE_ :int = XLMTokenizer.from_pretrained("xlm-mlm-en-2048")
SCREAMING_SNAKE_CASE_ :Any = tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[str] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase)
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 631
|
import argparse
import os
import re
import packaging.version
SCREAMING_SNAKE_CASE__ = "examples/"
SCREAMING_SNAKE_CASE__ = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
SCREAMING_SNAKE_CASE__ = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
SCREAMING_SNAKE_CASE__ = "README.md"
def lowercase ( a , a , a ):
'''simple docstring'''
with open(a , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE_ :Dict = f.read()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Union[str, Any] = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE_ :List[str] = replace.replace("VERSION" , a )
SCREAMING_SNAKE_CASE_ :str = re_pattern.sub(a , a )
with open(a , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(a )
def lowercase ( a ):
'''simple docstring'''
for folder, directories, fnames in os.walk(a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(a , a ) , a , pattern="examples" )
def lowercase ( a , a=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(a , a , a )
if not patch:
update_version_in_examples(a )
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = "🤗 Transformers currently provides the following architectures"
SCREAMING_SNAKE_CASE_ :Any = "1. Want to contribute a new model?"
with open(a , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE_ :List[str] = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE_ :int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ :int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(a , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(a )
def lowercase ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
SCREAMING_SNAKE_CASE_ :str = f.read()
SCREAMING_SNAKE_CASE_ :Optional[int] = REPLACE_PATTERNS["init"][0].search(a ).groups()[0]
return packaging.version.parse(a )
def lowercase ( a=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :str = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE_ :int = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
SCREAMING_SNAKE_CASE_ :List[str] = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE_ :List[Any] = input(F"Which version are you releasing? [{default_version}]" )
if len(a ) == 0:
SCREAMING_SNAKE_CASE_ :Any = default_version
print(F"Updating version to {version}." )
global_version_update(a , patch=a )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Optional[Any] = get_version()
SCREAMING_SNAKE_CASE_ :Optional[Any] = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
SCREAMING_SNAKE_CASE_ :str = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE_ :Any = input(F"Which version are we developing now? [{dev_version}]" )
if len(a ) == 0:
SCREAMING_SNAKE_CASE_ :Optional[Any] = dev_version
print(F"Updating version to {version}." )
global_version_update(a )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 631
| 1
|
"""simple docstring"""
from __future__ import annotations
a : List[Any] = list[tuple[int, int]]
a : str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a : Optional[Any] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> int:
a : Union[str, Any] = pos_x
a : List[str] = pos_y
a : List[Any] = (pos_y, pos_x)
a : List[str] = goal_x
a : str = goal_y
a : List[Any] = g_cost
a : Any = parent
a : Tuple = self.calculate_heuristic()
def __a ( self ) -> float:
a : Optional[Any] = abs(self.pos_x - self.goal_x )
a : List[Any] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , lowerCAmelCase__ ) -> bool:
return self.f_cost < other.f_cost
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
a : Dict = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCAmelCase__ )
a : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , lowerCAmelCase__ )
a : Any = [self.start]
a : str = []
a : str = False
def __a ( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a : Any = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
a : Optional[Any] = True
return self.retrace_path(lowerCAmelCase__ )
self.closed_nodes.append(lowerCAmelCase__ )
a : List[str] = self.get_successors(lowerCAmelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCAmelCase__ )
else:
# retrieve the best current path
a : List[str] = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCAmelCase__ )
else:
self.open_nodes.append(lowerCAmelCase__ )
if not self.reached:
return [self.start.pos]
return None
def __a ( self , lowerCAmelCase__ ) -> list[Node]:
a : str = []
for action in delta:
a : Dict = parent.pos_x + action[1]
a : List[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCAmelCase__ , lowerCAmelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCAmelCase__ , ) )
return successors
def __a ( self , lowerCAmelCase__ ) -> Path:
a : str = node
a : Union[str, Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
a : List[Any] = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
a : Dict = (0, 0)
a : Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
a : List[str] = GreedyBestFirst(init, goal)
a : Any = greedy_bf.search()
if path:
for pos_x, pos_y in path:
a : str = 2
for elem in grid:
print(elem)
| 716
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] ) ->Dict:
'''simple docstring'''
a : List[str] = 0
if start < end:
a : Tuple = randint(_lowercase , _lowercase )
a : List[str] = a[end]
a : str = a[pivot]
a : Optional[int] = temp
a, a : Dict = _in_place_partition(_lowercase , _lowercase , _lowercase )
count += _in_place_quick_sort(_lowercase , _lowercase , p - 1 )
count += _in_place_quick_sort(_lowercase , p + 1 , _lowercase )
return count
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : str , _lowercase : List[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = 0
a : List[Any] = randint(_lowercase , _lowercase )
a : int = a[end]
a : List[str] = a[pivot]
a : Tuple = temp
a : Union[str, Any] = start - 1
for index in range(_lowercase , _lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
a : List[str] = new_pivot_index + 1
a : Optional[int] = a[new_pivot_index]
a : Union[str, Any] = a[index]
a : List[Any] = temp
a : Tuple = a[new_pivot_index + 1]
a : str = a[end]
a : Dict = temp
return new_pivot_index + 1, count
a : int = TemporaryFile()
a : Tuple = 100 # 1000 elements are to be sorted
a , a : int = 0, 1 # mean and standard deviation
a : List[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a : int = np.load(outfile)
a : Tuple = len(M) - 1
a : Union[str, Any] = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 31
| 0
|
'''simple docstring'''
UpperCamelCase_ = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def lowercase__( __UpperCamelCase: dict ,__UpperCamelCase: Dict ,__UpperCamelCase: List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = set()
# keep track of all the paths to be checked
SCREAMING_SNAKE_CASE : Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
SCREAMING_SNAKE_CASE : List[Any] = queue.pop(0 )
# get the last node from the path
SCREAMING_SNAKE_CASE : Optional[Any] = path[-1]
if node not in explored:
SCREAMING_SNAKE_CASE : int = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
SCREAMING_SNAKE_CASE : Tuple = list(__UpperCamelCase )
new_path.append(__UpperCamelCase )
queue.append(__UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def lowercase__( __UpperCamelCase: dict ,__UpperCamelCase: Dict ,__UpperCamelCase: List[str] ):
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
SCREAMING_SNAKE_CASE : str = [start]
SCREAMING_SNAKE_CASE : List[Any] = set(__UpperCamelCase )
# Keep tab on distances from `start` node.
SCREAMING_SNAKE_CASE : int = {start: 0, target: -1}
while queue:
SCREAMING_SNAKE_CASE : Optional[Any] = queue.pop(0 )
if node == target:
SCREAMING_SNAKE_CASE : Optional[Any] = (
dist[node] if dist[target] == -1 else min(dist[target] ,dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__UpperCamelCase )
queue.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 28
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def _UpperCAmelCase ( a : List[str] , a : Any=False ):
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _UpperCAmelCase ( a : int , a : List[Any] , a : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = """"""
else:
snake_case__ = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : Dict , a : Union[str, Any] , a : int ):
snake_case__ = dct.pop(a )
snake_case__ = val
def _UpperCAmelCase ( ):
snake_case__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( a : List[str] , a : Tuple ):
snake_case__ = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ = 1000
snake_case__ = """huggingface/label-files"""
snake_case__ = """imagenet-1k-id2label.json"""
snake_case__ = json.load(open(hf_hub_download(a , a , repo_type="""dataset""" ) , """r""" ) )
snake_case__ = {int(a ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(deit_name[-6:-4] )
snake_case__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(a , pretrained=a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
snake_case__ = create_rename_keys(a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a , a )
# load HuggingFace model
snake_case__ = DeiTForImageClassificationWithTeacher(a ).eval()
model.load_state_dict(a )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ = DeiTImageProcessor(size=a , crop_size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ = encoding["""pixel_values"""]
snake_case__ = model(a )
snake_case__ = timm_model(a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a , outputs.logits , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 654
| 0
|
from __future__ import annotations
def UpperCamelCase ( _a , _a , _a ) -> tuple[float, list[float]]:
'''simple docstring'''
lowercase_ :Optional[int] = list(range(len(_a ) ) )
lowercase_ :int = [v / w for v, w in zip(_a , _a )]
index.sort(key=lambda _a : ratio[i] , reverse=_a )
lowercase_ :float = 0
lowercase_ :list[float] = [0] * len(_a )
for i in index:
if weight[i] <= capacity:
lowercase_ :Dict = 1
max_value += value[i]
capacity -= weight[i]
else:
lowercase_ :Optional[int] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 441
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
SCREAMING_SNAKE_CASE : Any = 250_004
SCREAMING_SNAKE_CASE : Optional[int] = 250_020
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Union[str, Any] =MBartTokenizer
lowercase : Dict =MBartTokenizerFast
lowercase : Dict =True
lowercase : List[Any] =True
def UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ :Optional[Any] = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
lowercase_ :Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ :List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase_ :Dict = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowercase_ :Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowercase_ :Optional[Any] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase_ :Optional[int] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase_ :int = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase_ :Optional[int] = tempfile.mkdtemp()
lowercase_ :Union[str, Any] = tokenizer_r.save_pretrained(UpperCamelCase_ )
lowercase_ :int = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowercase_ :Union[str, Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ )
# Checks everything loads correctly in the same way
lowercase_ :str = tokenizer_r.from_pretrained(UpperCamelCase_ )
lowercase_ :Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase_ )
# Save tokenizer rust, legacy_format=True
lowercase_ :int = tempfile.mkdtemp()
lowercase_ :Optional[Any] = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ )
lowercase_ :Any = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ )
# Checks everything loads correctly in the same way
lowercase_ :Optional[int] = tokenizer_r.from_pretrained(UpperCamelCase_ )
lowercase_ :Dict = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
shutil.rmtree(UpperCamelCase_ )
# Save tokenizer rust, legacy_format=False
lowercase_ :Tuple = tempfile.mkdtemp()
lowercase_ :Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ )
lowercase_ :Tuple = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase_ :int = tokenizer_r.from_pretrained(UpperCamelCase_ )
lowercase_ :Any = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
shutil.rmtree(UpperCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] ="""facebook/mbart-large-en-ro"""
lowercase : Optional[Any] =[
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowercase : Dict =[
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowercase : Tuple =[8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def UpperCamelCase ( cls ):
lowercase_ :MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
lowercase_ :Optional[int] = 1
return cls
def UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_0020 )
def UpperCamelCase ( self ):
lowercase_ :Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
def UpperCamelCase ( self ):
self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids )
lowercase_ :List[Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
lowercase_ :str = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
lowercase_ :List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Dict = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , UpperCamelCase_ )
lowercase_ :Dict = 10
lowercase_ :Any = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
def UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_0026, 25_0001] )
def UpperCamelCase ( self ):
lowercase_ :List[Any] = tempfile.mkdtemp()
lowercase_ :List[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase_ )
lowercase_ :Tuple = MBartTokenizer.from_pretrained(UpperCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_ )
@require_torch
def UpperCamelCase ( self ):
lowercase_ :str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors='''pt''' )
lowercase_ :Union[str, Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase ( self ):
lowercase_ :Any = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowercase_ :Tuple = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowercase_ :List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase ( self ):
lowercase_ :Any = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors='''pt''' )
lowercase_ :int = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors='''pt''' )
lowercase_ :Optional[int] = targets['''input_ids''']
lowercase_ :List[Any] = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase ( self ):
lowercase_ :int = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 25_0004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_0001,
} , )
| 441
| 1
|
from collections import defaultdict
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCamelCase_: Union[str, Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(_lowerCamelCase ) )
]
UpperCamelCase_: Optional[int] = defaultdict(_lowerCamelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCamelCase_: Optional[Any] = (1 << len(_lowerCamelCase )) - 1
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCamelCase_: Any = self.count_ways_until(_lowerCamelCase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCamelCase_: Tuple = total_ways_util
return self.dp[mask][task_no]
def _a ( self , _lowerCamelCase ):
# Store the list of persons for each task
for i in range(len(_lowerCamelCase ) ):
for j in task_performed[i]:
self.task[j].append(_lowerCamelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
A_ : int = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
A_ : Union[str, Any] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 57
|
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
_SCREAMING_SNAKE_CASE : Any = ("""dense.weight""", """attention.self.query""", """attention.self.key""", """attention.self.value""")
_SCREAMING_SNAKE_CASE : int = (
("""layer.""", """layer_"""),
("""word_embeddings.weight""", """word_embeddings"""),
("""position_embeddings.weight""", """position_embeddings"""),
("""token_type_embeddings.weight""", """token_type_embeddings"""),
(""".""", """/"""),
("""LayerNorm/weight""", """LayerNorm/gamma"""),
("""LayerNorm/bias""", """LayerNorm/beta"""),
("""weight""", """kernel"""),
)
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
os.makedirs(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model.state_dict()
def to_tf_var_name(__SCREAMING_SNAKE_CASE ):
for patt, repl in iter(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return F"""bert/{name}"""
def create_tf_var(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = tf.dtypes.as_dtype(tensor.dtype )
_SCREAMING_SNAKE_CASE : Tuple = tf.get_variable(dtype=__SCREAMING_SNAKE_CASE , shape=tensor.shape , name=__SCREAMING_SNAKE_CASE , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__SCREAMING_SNAKE_CASE )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_SCREAMING_SNAKE_CASE : Optional[int] = to_tf_var_name(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_SCREAMING_SNAKE_CASE : List[str] = torch_tensor.T
_SCREAMING_SNAKE_CASE : List[str] = create_tf_var(tensor=__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE , session=__SCREAMING_SNAKE_CASE )
tf.keras.backend.set_value(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[int] = session.run(__SCREAMING_SNAKE_CASE )
print(F"""Successfully created {tf_name}: {np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , model_name.replace("""-""" , """_""" ) + """.ckpt""" ) )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE=None )-> Optional[Any]:
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""model name e.g. bert-base-uncased""" )
parser.add_argument(
"""--cache_dir""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Directory containing pytorch model""" )
parser.add_argument("""--pytorch_model_path""" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""/path/to/<pytorch-model-name>.bin""" )
parser.add_argument("""--tf_cache_dir""" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Directory in which to save tensorflow model""" )
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Union[str, Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__SCREAMING_SNAKE_CASE , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 338
| 0
|
"""simple docstring"""
def __snake_case ( ) -> int:
"""simple docstring"""
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def __snake_case ( UpperCamelCase__ ) -> str:
"""simple docstring"""
A = 1
A = 2
while i * i <= n:
A = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __snake_case ( ) -> Tuple:
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(UpperCamelCase__ ) > 500 )
if __name__ == "__main__":
print(solution())
| 91
|
"""simple docstring"""
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
while b:
A , A = b, a % b
return a
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(UpperCamelCase__ , a % b )
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 91
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase: Union[str, Any] = logging.get_logger(__name__)
class a__( lowerCamelCase__ ):
def __init__( self : Optional[Any] , *__snake_case : Dict , **__snake_case : Union[str, Any] ):
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 526
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase: Tuple = logging.get_logger(__name__)
lowerCAmelCase: Any = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class a__( lowerCamelCase__ ):
lowercase__ = """pegasus"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Union[str, Any] , __snake_case : Tuple=5_02_65 , __snake_case : List[Any]=10_24 , __snake_case : Optional[Any]=12 , __snake_case : Tuple=40_96 , __snake_case : str=16 , __snake_case : int=12 , __snake_case : Tuple=40_96 , __snake_case : int=16 , __snake_case : Tuple=0.0 , __snake_case : Any=0.0 , __snake_case : Any=True , __snake_case : Optional[int]=True , __snake_case : Any="gelu" , __snake_case : str=10_24 , __snake_case : Dict=0.1 , __snake_case : List[Any]=0.0 , __snake_case : str=0.0 , __snake_case : str=0.02 , __snake_case : Union[str, Any]=0 , __snake_case : List[Any]=False , __snake_case : Optional[Any]=0 , __snake_case : str=1 , __snake_case : Any=1 , **__snake_case : int , ):
a : Optional[int] = vocab_size
a : List[str] = max_position_embeddings
a : List[Any] = d_model
a : Tuple = encoder_ffn_dim
a : List[str] = encoder_layers
a : str = encoder_attention_heads
a : str = decoder_ffn_dim
a : Optional[int] = decoder_layers
a : Optional[int] = decoder_attention_heads
a : Optional[int] = dropout
a : Any = attention_dropout
a : str = activation_dropout
a : List[Any] = activation_function
a : Union[str, Any] = init_std
a : List[Any] = encoder_layerdrop
a : Union[str, Any] = decoder_layerdrop
a : List[Any] = use_cache
a : Any = encoder_layers
a : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
@property
def lowercase_ ( self : int ):
return self.encoder_attention_heads
@property
def lowercase_ ( self : int ):
return self.d_model
| 526
| 1
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__a = logging.get_logger(__name__)
class lowercase__:
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : uuid.UUID = None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Tuple=None ) -> Tuple:
if not conversation_id:
lowercase_ = uuid.uuida()
if past_user_inputs is None:
lowercase_ = []
if generated_responses is None:
lowercase_ = []
lowercase_ = conversation_id
lowercase_ = past_user_inputs
lowercase_ = generated_responses
lowercase_ = text
def __eq__( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> int:
if self.new_user_input:
if overwrite:
logger.warning(
f'''User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten '''
f'''with: \"{text}\".''' )
lowercase_ = text
else:
logger.warning(
f'''User input added while unprocessed input was existing: \"{self.new_user_input}\" new input '''
f'''ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
lowercase_ = text
def _lowercase ( self : Dict ) -> int:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowercase_ = None
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
self.generated_responses.append(_UpperCAmelCase )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : int ) -> Union[str, Any]:
lowercase_ = f'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
lowercase_ = '''user''' if is_user else '''bot'''
output += f'''{name} >> {text} \n'''
return output
@add_end_docstrings(
UpperCAmelCase , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]:
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
if self.tokenizer.pad_token_id is None:
lowercase_ = self.tokenizer.eos_token
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Any=None , **SCREAMING_SNAKE_CASE_ : Dict ) -> int:
lowercase_ = {}
lowercase_ = {}
lowercase_ = {}
if min_length_for_response is not None:
lowercase_ = min_length_for_response
if minimum_tokens is not None:
lowercase_ = minimum_tokens
if "max_length" in generate_kwargs:
lowercase_ = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowercase_ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_UpperCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[Conversation, List[Conversation]] , SCREAMING_SNAKE_CASE_ : Tuple=0 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
lowercase_ = super().__call__(_UpperCAmelCase , num_workers=_UpperCAmelCase , **_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) == 1:
return outputs[0]
return outputs
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Conversation , SCREAMING_SNAKE_CASE_ : str=3_2 ) -> Dict[str, Any]:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
lowercase_ = self.tokenizer._build_conversation_input_ids(_UpperCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowercase_ = self._legacy_parse_and_tokenize(_UpperCAmelCase )
if self.framework == "pt":
lowercase_ = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowercase_ = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int=1_0 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
lowercase_ = generate_kwargs.get('''max_length''' , self.model.config.max_length )
lowercase_ = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
lowercase_ = max_length - minimum_tokens
lowercase_ = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
lowercase_ = model_inputs['''attention_mask'''][:, -trim:]
lowercase_ = model_inputs.pop('''conversation''' )
lowercase_ = max_length
lowercase_ = self.model.generate(**_UpperCAmelCase , **_UpperCAmelCase )
if self.model.config.is_encoder_decoder:
lowercase_ = 1
else:
lowercase_ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int=True ) -> List[str]:
lowercase_ = model_outputs['''output_ids''']
lowercase_ = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , )
lowercase_ = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(_UpperCAmelCase )
return conversation
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Conversation ) -> Dict:
lowercase_ = self.tokenizer.eos_token_id
lowercase_ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
if len(_UpperCAmelCase ) > self.tokenizer.model_max_length:
lowercase_ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 714
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :List[Any] = RoFormerTokenizer
a :Any = RoFormerTokenizerFast
a :List[str] = True
a :List[Any] = True
def _lowercase ( self : str ) -> Any:
super().setUp()
def _lowercase ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]:
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Union[str, Any]:
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> List[str]:
lowercase_ = '''永和服装饰品有限公司,今天天气非常好'''
lowercase_ = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def _lowercase ( self : Any ) -> Any:
lowercase_ = self.get_tokenizer()
lowercase_ , lowercase_ = self.get_chinese_input_output_texts()
lowercase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , output_text.split() )
lowercase_ = tokens + [tokenizer.unk_token]
lowercase_ = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> Dict:
lowercase_ = self.get_rust_tokenizer()
lowercase_ , lowercase_ = self.get_chinese_input_output_texts()
lowercase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , output_text.split() )
lowercase_ = tokens + [tokenizer.unk_token]
lowercase_ = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple ) -> Union[str, Any]:
pass
def _lowercase ( self : Dict ) -> Optional[int]:
pass
def _lowercase ( self : Tuple ) -> str:
pass
| 409
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Any = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 210
|
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowerCamelCase__ ( A : str = "isbn/0140328726" ):
'''simple docstring'''
UpperCAmelCase = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
UpperCAmelCase = f"""{olid} is not a valid Open Library olid"""
raise ValueError(A )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def lowerCamelCase__ ( A : dict ):
'''simple docstring'''
UpperCAmelCase = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
UpperCAmelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCAmelCase = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
UpperCAmelCase = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(A , A ):
UpperCAmelCase = ''', '''.join(A )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowercase : List[str] = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
_lowercase : Optional[int] = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print("""\n""".join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 210
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = ["""input_ids""", """attention_mask"""]
_lowerCamelCase = None
def __init__( self , __A=None , __A=None , __A=None , __A="<unk>" , __A="<s>" , __A="</s>" , __A="<pad>" , __A=False , __A=False , **__A , ):
super().__init__(
__A , __A , tokenizer_file=__A , unk_token=__A , bos_token=__A , eos_token=__A , pad_token=__A , add_prefix_space=__A , clean_up_tokenization_spaces=__A , **__A , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __A ) != add_prefix_space:
__a = getattr(__A , pre_tok_state.pop("""type""" ) )
__a = add_prefix_space
__a = pre_tok_class(**__A )
__a = add_prefix_space
def snake_case_ ( self , *__A , **__A ):
__a = kwargs.get("""is_split_into_words""" , __A )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*__A , **__A )
def snake_case_ ( self , *__A , **__A ):
__a = kwargs.get("""is_split_into_words""" , __A )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
""" pretokenized inputs.""" )
return super()._encode_plus(*__A , **__A )
def snake_case_ ( self , __A , __A = None ):
__a = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def snake_case_ ( self , __A ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__A , add_special_tokens=__A ) + [self.eos_token_id] )
if len(__A ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 209
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
SCREAMING_SNAKE_CASE = {'facebook/blenderbot_small-90M': 5_1_2}
def a (lowerCAmelCase__ ):
__a = set()
__a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__a = char
__a = set(lowerCAmelCase__ )
return pairs
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , __A , __A , __A="__start__" , __A="__end__" , __A="__unk__" , __A="__null__" , **__A , ):
super().__init__(unk_token=__A , bos_token=__A , eos_token=__A , pad_token=__A , **__A )
with open(__A , encoding="""utf-8""" ) as vocab_handle:
__a = json.load(__A )
__a = {v: k for k, v in self.encoder.items()}
with open(__A , encoding="""utf-8""" ) as merges_handle:
__a = merges_handle.read().split("""\n""" )[1:-1]
__a = [tuple(merge.split() ) for merge in merges]
__a = dict(zip(__A , range(len(__A ) ) ) )
__a = {}
@property
def snake_case_ ( self ):
return len(self.encoder )
def snake_case_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self , __A ):
if token in self.cache:
return self.cache[token]
__a = re.sub("""([.,!?()])""" , R""" \1""" , __A )
__a = re.sub("""(')""" , R""" \1 """ , __A )
__a = re.sub(R"""\s{2,}""" , """ """ , __A )
if "\n" in token:
__a = token.replace("""\n""" , """ __newln__""" )
__a = token.split(""" """ )
__a = []
for token in tokens:
if not len(__A ):
continue
__a = token.lower()
__a = tuple(__A )
__a = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
__a = get_pairs(__A )
if not pairs:
words.append(__A )
continue
while True:
__a = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__a , __a = bigram
__a = []
__a = 0
while i < len(__A ):
try:
__a = word.index(__A , __A )
new_word.extend(word[i:j] )
__a = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__a = tuple(__A )
__a = new_word
if len(__A ) == 1:
break
else:
__a = get_pairs(__A )
__a = """@@ """.join(__A )
__a = word[:-4]
__a = word
words.append(__A )
return " ".join(__A )
def snake_case_ ( self , __A ):
__a = []
__a = re.findall(R"""\S+\n?""" , __A )
for token in words:
split_tokens.extend(list(self.bpe(__A ).split(""" """ ) ) )
return split_tokens
def snake_case_ ( self , __A ):
__a = token.lower()
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def snake_case_ ( self , __A ):
return self.decoder.get(__A , self.unk_token )
def snake_case_ ( self , __A ):
__a = """ """.join(__A ).replace("""@@ """ , """""" ).strip()
return out_string
def snake_case_ ( self , __A , __A = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__a = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + """\n""" )
__a = 0
with open(__A , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
__a = token_index
writer.write(""" """.join(__A ) + """\n""" )
index += 1
return vocab_file, merge_file
| 209
| 1
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , ) ->int:
"""simple docstring"""
__UpperCAmelCase : str = {}
if train_file is not None:
__UpperCAmelCase : int = [train_file]
if eval_file is not None:
__UpperCAmelCase : str = [eval_file]
if test_file is not None:
__UpperCAmelCase : List[Any] = [test_file]
__UpperCAmelCase : Any = datasets.load_dataset('''csv''' , data_files=UpperCAmelCase_ )
__UpperCAmelCase : Any = list(ds[list(files.keys() )[0]].features.keys() )
__UpperCAmelCase : Dict = features_name.pop(UpperCAmelCase_ )
__UpperCAmelCase : str = list(set(ds[list(files.keys() )[0]][label_name] ) )
__UpperCAmelCase : Any = {label: i for i, label in enumerate(UpperCAmelCase_ )}
__UpperCAmelCase : Tuple = tokenizer.model_input_names
__UpperCAmelCase : Dict = {}
if len(UpperCAmelCase_ ) == 1:
for k in files.keys():
__UpperCAmelCase : Tuple = ds[k].map(
lambda UpperCAmelCase_ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='''max_length''' ) , batched=UpperCAmelCase_ , )
elif len(UpperCAmelCase_ ) == 2:
for k in files.keys():
__UpperCAmelCase : List[str] = ds[k].map(
lambda UpperCAmelCase_ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='''max_length''' , ) , batched=UpperCAmelCase_ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__UpperCAmelCase : Optional[int] = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__UpperCAmelCase : List[str] = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__UpperCAmelCase : str = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
__UpperCAmelCase : List[str] = (
tf.data.Dataset.from_generator(
UpperCAmelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__UpperCAmelCase : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__UpperCAmelCase : List[Any] = (
tf.data.Dataset.from_generator(
UpperCAmelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__UpperCAmelCase : List[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__UpperCAmelCase : Tuple = (
tf.data.Dataset.from_generator(
UpperCAmelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__UpperCAmelCase : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowercase__ :List[str] = logging.getLogger(__name__)
@dataclass
class snake_case :
'''simple docstring'''
_A : int = field(metadata={'help': 'Which column contains the label'} )
_A : str = field(default=__UpperCAmelCase , metadata={'help': 'The path of the training file'} )
_A : Optional[str] = field(default=__UpperCAmelCase , metadata={'help': 'The path of the development file'} )
_A : Optional[str] = field(default=__UpperCAmelCase , metadata={'help': 'The path of the test file'} )
_A : int = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_A : bool = field(
default=__UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class snake_case :
'''simple docstring'''
_A : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_A : bool = field(default=__UpperCAmelCase , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def lowerCamelCase_ ( ) ->int:
"""simple docstring"""
__UpperCAmelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCAmelCase_ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__UpperCAmelCase : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCAmelCase_ ) , labelaid=UpperCAmelCase_ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__UpperCAmelCase : int = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCAmelCase_ ) -> Dict:
__UpperCAmelCase : Union[str, Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__UpperCAmelCase : int = TFTrainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase : Optional[Any] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase : Optional[Any] = trainer.evaluate()
__UpperCAmelCase : Tuple = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCAmelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(UpperCAmelCase_ )
return results
if __name__ == "__main__":
main()
| 522
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowerCamelCase_ ( *UpperCAmelCase_ ) ->Optional[int]:
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = list(UpperCAmelCase_ )
for i in range(len(UpperCAmelCase_ ) ):
__UpperCAmelCase : Optional[Any] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowerCamelCase_ ( UpperCAmelCase_ ) ->bool:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowerCamelCase_ ( UpperCAmelCase_ = None , UpperCAmelCase_ = 1_28 ) ->str:
"""simple docstring"""
if function is None:
return functools.partial(UpperCAmelCase_ , starting_batch_size=UpperCAmelCase_ )
__UpperCAmelCase : List[str] = starting_batch_size
def decorator(*UpperCAmelCase_ , **UpperCAmelCase_ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__UpperCAmelCase : Optional[int] = list(inspect.signature(UpperCAmelCase_ ).parameters.keys() )
# Guard against user error
if len(UpperCAmelCase_ ) < (len(UpperCAmelCase_ ) + 1):
__UpperCAmelCase : Dict = ''', '''.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
except Exception as e:
if should_reduce_batch_size(UpperCAmelCase_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 522
| 1
|
import math
import os
import sys
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Union[str, Any] = ''
try:
with open(__UpperCAmelCase , 'rb' ) as binary_file:
__snake_case : Optional[Any] = binary_file.read()
for dat in data:
__snake_case : Tuple = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : dict[str, str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ):
lexicon.pop(__UpperCAmelCase )
__snake_case : Union[str, Any] = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
__snake_case : Tuple = '0' + lexicon[curr_key]
__snake_case : Any = bin(__UpperCAmelCase )[2:]
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Tuple = {'0': '0', '1': '1'}
__snake_case , __snake_case : Optional[int] = '', ''
__snake_case : str = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
index += 1
__snake_case : Union[str, Any] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__snake_case : Any = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = os.path.getsize(__UpperCAmelCase )
__snake_case : List[Any] = bin(__UpperCAmelCase )[2:]
__snake_case : Any = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : Tuple = 8
try:
with open(__UpperCAmelCase , 'wb' ) as opened_file:
__snake_case : int = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = read_file_binary(__UpperCAmelCase )
__snake_case : Tuple = compress_data(__UpperCAmelCase )
__snake_case : int = add_file_length(__UpperCAmelCase , __UpperCAmelCase )
write_file_binary(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 679
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 679
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
_lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 506
|
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : str ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 506
| 1
|
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 671
|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = [[] for _ in range(lowerCamelCase )]
lowercase__ = size
def __getitem__( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self._size
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = deque([start_vertex] )
lowercase__ = [None] * self.size
lowercase__ = 0
while queue:
lowercase__ = queue.popleft()
lowercase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ = current_distance + edge.weight
lowercase__ = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list[int] ): # This function is recursive
__UpperCAmelCase = len(snake_case_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__UpperCAmelCase = array[0]
__UpperCAmelCase = False
__UpperCAmelCase = 1
__UpperCAmelCase = []
while not is_found and i < array_length:
if array[i] < pivot:
__UpperCAmelCase = True
__UpperCAmelCase = [element for element in array[i:] if element >= array[i]]
__UpperCAmelCase = longest_subsequence(snake_case_ )
if len(snake_case_ ) > len(snake_case_ ):
__UpperCAmelCase = temp_array
else:
i += 1
__UpperCAmelCase = [element for element in array[1:] if element >= pivot]
__UpperCAmelCase = [pivot, *longest_subsequence(snake_case_ )]
if len(snake_case_ ) > len(snake_case_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
UpperCamelCase = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCamelCase = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCamelCase = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCamelCase = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCamelCase = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCamelCase = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCamelCase = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCamelCase = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 520
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = XLMTokenizer
__snake_case = False
def lowercase__ ( self : int ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ : Any =[
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A__ : List[str] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : Optional[int] =["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A__ : Any =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[str] ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] ="""lower newer"""
A__ : Dict ="""lower newer"""
return input_text, output_text
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
A__ : int =XLMTokenizer(self.vocab_file , self.merges_file )
A__ : Optional[int] ="""lower"""
A__ : List[str] =["""low""", """er</w>"""]
A__ : int =tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Dict =tokens + ["""<unk>"""]
A__ : int =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
@slow
def lowercase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[Any] =XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
A__ : Optional[int] =tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase_ )
A__ : Dict =tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase_ )
A__ : str =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
A__ : List[Any] =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 687
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : List[str] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687
| 1
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
a__ : str = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowerCamelCase__ , cache_dir=lowerCamelCase__ )
a__ : Dict = [t[-1] for t in os.walk(os.path.join(lowerCamelCase__ , os.listdir(lowerCamelCase__ )[0] , "snapshots" ) )]
a__ : Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : str ):
a__, a__ : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowerCamelCase__ )
a__ : Any = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a__ : Dict = jax.random.PRNGKey(0 )
a__ : Tuple = 4
a__ : int = jax.device_count()
a__ : int = num_samples * [prompt]
a__ : Tuple = pipeline.prepare_inputs(lowerCamelCase__ )
# shard inputs and rng
a__ : str = replicate(lowerCamelCase__ )
a__ : Dict = jax.random.split(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[int] = shard(lowerCamelCase__ )
a__ : Dict = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3
assert np.abs(np.abs(lowerCamelCase__ , dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
a__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase__ ) == num_samples
def _UpperCamelCase( self : int ):
a__, a__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowerCamelCase__ )
a__ : List[Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a__ : Optional[Any] = jax.random.PRNGKey(0 )
a__ : Optional[Any] = 50
a__ : Any = jax.device_count()
a__ : List[Any] = num_samples * [prompt]
a__ : Dict = pipeline.prepare_inputs(lowerCamelCase__ )
# shard inputs and rng
a__ : int = replicate(lowerCamelCase__ )
a__ : List[Any] = jax.random.split(lowerCamelCase__ , lowerCamelCase__ )
a__ : str = shard(lowerCamelCase__ )
a__ : Optional[Any] = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase__ , dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def _UpperCamelCase( self : Optional[Any] ):
a__, a__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase__ )
a__ : Optional[int] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a__ : Optional[int] = jax.random.PRNGKey(0 )
a__ : int = 50
a__ : List[Any] = jax.device_count()
a__ : Optional[int] = num_samples * [prompt]
a__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase__ )
# shard inputs and rng
a__ : Dict = replicate(lowerCamelCase__ )
a__ : Dict = jax.random.split(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[int] = shard(lowerCamelCase__ )
a__ : Tuple = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def _UpperCamelCase( self : Optional[int] ):
a__, a__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
a__ : List[Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a__ : List[str] = jax.random.PRNGKey(0 )
a__ : Tuple = 50
a__ : Optional[Any] = jax.device_count()
a__ : Optional[int] = num_samples * [prompt]
a__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase__ )
# shard inputs and rng
a__ : int = replicate(lowerCamelCase__ )
a__ : Union[str, Any] = jax.random.split(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[int] = shard(lowerCamelCase__ )
a__ : Union[str, Any] = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[int] = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , )
a__, a__ : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , )
a__ : List[str] = scheduler.create_state()
a__ : Optional[int] = scheduler_state
a__ : Dict = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a__ : Optional[int] = jax.random.PRNGKey(0 )
a__ : Dict = 50
a__ : Dict = jax.device_count()
a__ : Any = num_samples * [prompt]
a__ : str = pipeline.prepare_inputs(lowerCamelCase__ )
# shard inputs and rng
a__ : Dict = replicate(lowerCamelCase__ )
a__ : Tuple = jax.random.split(lowerCamelCase__ , lowerCamelCase__ )
a__ : Tuple = shard(lowerCamelCase__ )
a__ : int = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase__ , dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def _UpperCamelCase( self : int ):
a__ : Optional[int] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a__ : int = jax.device_count()
a__ : Union[str, Any] = num_samples * [prompt]
a__ : Optional[Any] = jax.random.split(jax.random.PRNGKey(0 ) , lowerCamelCase__ )
a__, a__ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase__ , )
a__ : Dict = replicate(lowerCamelCase__ )
a__ : Dict = pipeline.prepare_inputs(lowerCamelCase__ )
a__ : List[str] = shard(lowerCamelCase__ )
a__ : Union[str, Any] = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
a__ : Optional[Any] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
a__, a__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase__ , use_memory_efficient_attention=lowerCamelCase__ , )
a__ : Any = replicate(lowerCamelCase__ )
a__ : Union[str, Any] = pipeline.prepare_inputs(lowerCamelCase__ )
a__ : Union[str, Any] = shard(lowerCamelCase__ )
a__ : Dict = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
a__ : Dict = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 37
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCamelCase : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCamelCase_ ( __a ) -> Any:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCamelCase_ ( __a , __a , __a ) -> Any:
return max(metric_fn(__a , __a ) for gt in ground_truths )
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = []
if args.gold_data_mode == "qa":
a__ : Any = pd.read_csv(__a , sep="\t" , header=__a )
for answer_list in data[1]:
a__ : Union[str, Any] = ast.literal_eval(__a )
answers.append(__a )
else:
a__ : List[str] = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : List[str] = [[reference] for reference in references]
a__ : List[str] = 0
for prediction, ground_truths in zip(__a , __a ):
total += 1
em += metric_max_over_ground_truths(__a , __a , __a )
fa += metric_max_over_ground_truths(__a , __a , __a )
a__ : Dict = 100.0 * em / total
a__ : Optional[Any] = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Optional[Any] = args.k
a__ : str = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = 0
for hypo, reference in zip(__a , __a ):
a__ : Any = set(hypo.split("\t" )[:k] )
a__ : Union[str, Any] = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
a__ : Union[str, Any] = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
def strip_title(__a ):
if title.startswith("\"" ):
a__ : Optional[Any] = title[1:]
if title.endswith("\"" ):
a__ : Union[str, Any] = title[:-1]
return title
a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__a , return_tensors="pt" , padding=__a , truncation=__a , )["input_ids"].to(args.device )
a__ : Optional[int] = rag_model.rag.question_encoder(__a )
a__ : Union[str, Any] = question_enc_outputs[0]
a__ : Optional[int] = rag_model.retriever(
__a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
a__ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
a__ : int = []
for docs in all_docs:
a__ : Optional[int] = [strip_title(__a ) for title in docs["title"]]
provenance_strings.append("\t".join(__a ) )
return provenance_strings
def UpperCamelCase_ ( __a , __a , __a ) -> Dict:
with torch.no_grad():
a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__a , return_tensors="pt" , padding=__a , truncation=__a )
a__ : Any = inputs_dict.input_ids.to(args.device )
a__ : Dict = inputs_dict.attention_mask.to(args.device )
a__ : Optional[int] = rag_model.generate( # rag_model overwrites generate
__a , attention_mask=__a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
a__ : int = rag_model.retriever.generator_tokenizer.batch_decode(__a , skip_special_tokens=__a )
if args.print_predictions:
for q, a in zip(__a , __a ):
logger.info("Q: {} - A: {}".format(__a , __a ) )
return answers
def UpperCamelCase_ ( ) -> List[str]:
a__ : int = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__a , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=__a , choices=["exact", "compressed", "legacy"] , type=__a , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=__a , type=__a , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=__a , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=__a , type=__a , required=__a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__a , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=__a , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=__a , type=__a , required=__a , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=__a , type=__a , required=__a , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=__a , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=__a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=__a , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=__a , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=__a , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=__a , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
a__ : int = parser.parse_args()
a__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def UpperCamelCase_ ( __a ) -> Optional[int]:
a__ : Tuple = {}
if args.model_type is None:
a__ : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
a__ : int = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
a__ : Tuple = args.n_docs
if args.index_name is not None:
a__ : Any = args.index_name
if args.index_path is not None:
a__ : int = args.index_path
else:
a__ : Optional[Any] = BartForConditionalGeneration
a__ : Tuple = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , __a )
a__ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k
a__ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(__a , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(__a ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
a__ : str = RagRetriever.from_pretrained(__a , **__a )
a__ : Optional[int] = model_class.from_pretrained(__a , retriever=__a , **__a )
model.retriever.init_retrieval()
else:
a__ : Dict = model_class.from_pretrained(__a , **__a )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
a__ : List[Any] = []
for line in tqdm(__a ):
questions.append(line.strip() )
if len(__a ) == args.eval_batch_size:
a__ : Union[str, Any] = evaluate_batch_fn(__a , __a , __a )
preds_file.write("\n".join(__a ) + "\n" )
preds_file.flush()
a__ : Any = []
if len(__a ) > 0:
a__ : List[str] = evaluate_batch_fn(__a , __a , __a )
preds_file.write("\n".join(__a ) )
preds_file.flush()
score_fn(__a , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCamelCase : List[Any] = get_args()
main(args)
| 37
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCamelCase__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase__ ( _A , _A , _A , _A , _A ):
"""simple docstring"""
for attribute in key.split('''.''' ):
a_ = getattr(_A , _A )
if weight_type is not None:
a_ = getattr(_A , _A ).shape
else:
a_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
a_ = value
elif weight_type == "weight_g":
a_ = value
elif weight_type == "weight_v":
a_ = value
elif weight_type == "bias":
a_ = value
elif weight_type == "running_mean":
a_ = value
elif weight_type == "running_var":
a_ = value
elif weight_type == "num_batches_tracked":
a_ = value
elif weight_type == "inv_freq":
a_ = value
else:
a_ = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCAmelCase__ ( _A , _A , _A ):
"""simple docstring"""
a_ = []
a_ = fairseq_model.state_dict()
a_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
a_ = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == '''group''' , )
a_ = True
else:
for key, mapped_key in MAPPING.items():
a_ = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
a_ = True
if "*" in mapped_key:
a_ = name.split(_A )[0].split('''.''' )[-2]
a_ = mapped_key.replace('''*''' , _A )
if "pos_bias_u" in name:
a_ = None
elif "pos_bias_v" in name:
a_ = None
elif "weight_g" in name:
a_ = '''weight_g'''
elif "weight_v" in name:
a_ = '''weight_v'''
elif "bias" in name:
a_ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a_ = '''weight'''
elif "running_mean" in name:
a_ = '''running_mean'''
elif "inv_freq" in name:
a_ = '''inv_freq'''
elif "running_var" in name:
a_ = '''running_var'''
elif "num_batches_tracked" in name:
a_ = '''num_batches_tracked'''
else:
a_ = None
set_recursively(_A , _A , _A , _A , _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(f"Unused weights: {unused_weights}" )
def UpperCAmelCase__ ( _A , _A , _A , _A , _A ):
"""simple docstring"""
a_ = full_name.split('''conv_layers.''' )[-1]
a_ = name.split('''.''' )
a_ = int(items[0] )
a_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
a_ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
a_ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
a_ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
a_ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_A )
@torch.no_grad()
def UpperCAmelCase__ ( _A , _A , _A=None , _A=None , _A=True ):
"""simple docstring"""
if config_path is not None:
a_ = WavaVecaConformerConfig.from_pretrained(_A , hidden_act='''swish''' )
else:
a_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
a_ = '''rotary'''
if is_finetuned:
if dict_path:
a_ = Dictionary.load(_A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a_ = target_dict.pad_index
a_ = target_dict.bos_index
a_ = target_dict.eos_index
a_ = len(target_dict.symbols )
a_ = os.path.join(_A , '''vocab.json''' )
if not os.path.isdir(_A ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_A ) )
return
os.makedirs(_A , exist_ok=_A )
a_ = target_dict.indices
# fairseq has the <pad> and <s> switched
a_ = 0
a_ = 1
with open(_A , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_A , _A )
a_ = WavaVecaCTCTokenizer(
_A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_A , )
a_ = True if config.feat_extract_norm == '''layer''' else False
a_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
a_ = WavaVecaProcessor(feature_extractor=_A , tokenizer=_A )
processor.save_pretrained(_A )
a_ = WavaVecaConformerForCTC(_A )
else:
a_ = WavaVecaConformerForPreTraining(_A )
if is_finetuned:
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
a_ = argparse.Namespace(task='''audio_pretraining''' )
a_ = fairseq.tasks.setup_task(_A )
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_A )
a_ = model[0].eval()
recursively_load_weights(_A , _A , not is_finetuned )
hf_wavavec.save_pretrained(_A )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCamelCase__ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 143
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Any=7 , lowercase__ : Any=3 , lowercase__ : str=3_0 , lowercase__ : int=4_0_0 , lowercase__ : Dict=True , lowercase__ : Union[str, Any]=None , lowercase__ : str=True , lowercase__ : Dict=1 / 2_5_5 , lowercase__ : List[Any]=True , lowercase__ : Dict=[0.5, 0.5, 0.5] , lowercase__ : List[str]=[0.5, 0.5, 0.5] , lowercase__ : List[str]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a_ = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = do_rescale
a_ = rescale_factor
a_ = do_normalize
a_ = image_mean
a_ = image_std
a_ = do_pad
def __magic_name__ ( self : int ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __magic_name__ ( self : Tuple , lowercase__ : Optional[int] , lowercase__ : Optional[int]=False ):
if not batched:
a_ = image_inputs[0]
if isinstance(lowercase__ , Image.Image ):
a_ , a_ = image.size
else:
a_ , a_ = image.shape[1], image.shape[2]
if w < h:
a_ = int(self.size['''shortest_edge'''] * h / w )
a_ = self.size['''shortest_edge''']
elif w > h:
a_ = self.size['''shortest_edge''']
a_ = int(self.size['''shortest_edge'''] * w / h )
else:
a_ = self.size['''shortest_edge''']
a_ = self.size['''shortest_edge''']
else:
a_ = []
for image in image_inputs:
a_ , a_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a_ = max(lowercase__ , key=lambda lowercase__ : item[0] )[0]
a_ = max(lowercase__ , key=lambda lowercase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( a__ , unittest.TestCase ):
_lowerCAmelCase = DetrImageProcessor if is_vision_available() else None
def __magic_name__ ( self : Dict ):
a_ = DetrImageProcessingTester(self )
@property
def __magic_name__ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : Optional[int] ):
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowercase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_rescale''' ) )
self.assertTrue(hasattr(lowercase__ , '''rescale_factor''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowercase__ , '''size''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_pad''' ) )
def __magic_name__ ( self : Any ):
a_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , lowercase__ )
a_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=lowercase__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , lowercase__ )
def __magic_name__ ( self : str ):
pass
def __magic_name__ ( self : List[Any] ):
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(lowercase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ , a_ = self.image_processor_tester.get_expected_values(lowercase__ , batched=lowercase__ )
a_ = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self : Optional[int] ):
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(lowercase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(lowercase__ , batched=lowercase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self : List[str] ):
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(lowercase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(lowercase__ , batched=lowercase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __magic_name__ ( self : Dict ):
# prepare image and target
a_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
a_ = json.loads(f.read() )
a_ = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
a_ = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
a_ = image_processing(images=lowercase__ , annotations=lowercase__ , return_tensors='''pt''' )
# verify pixel values
a_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , lowercase__ )
a_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowercase__ , atol=1e-4 ) )
# verify area
a_ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowercase__ ) )
# verify boxes
a_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowercase__ )
a_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowercase__ , atol=1e-3 ) )
# verify image_id
a_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowercase__ ) )
# verify is_crowd
a_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowercase__ ) )
# verify class_labels
a_ = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowercase__ ) )
# verify orig_size
a_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowercase__ ) )
# verify size
a_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowercase__ ) )
@slow
def __magic_name__ ( self : str ):
# prepare image, target and masks_path
a_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
a_ = json.loads(f.read() )
a_ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
a_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
a_ = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
a_ = image_processing(images=lowercase__ , annotations=lowercase__ , masks_path=lowercase__ , return_tensors='''pt''' )
# verify pixel values
a_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , lowercase__ )
a_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowercase__ , atol=1e-4 ) )
# verify area
a_ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowercase__ ) )
# verify boxes
a_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowercase__ )
a_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowercase__ , atol=1e-3 ) )
# verify image_id
a_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowercase__ ) )
# verify is_crowd
a_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowercase__ ) )
# verify class_labels
a_ = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowercase__ ) )
# verify masks
a_ = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowercase__ )
# verify orig_size
a_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowercase__ ) )
# verify size
a_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowercase__ ) )
| 143
| 1
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Tuple = 'umt5'
__lowerCamelCase : Optional[int] = ['past_key_values']
def __init__(self , A=250_112 , A=512 , A=64 , A=1_024 , A=8 , A=None , A=6 , A=32 , A=128 , A=0.1 , A=1E-6 , A=1.0 , A="gated-gelu" , A=True , A=True , A="T5Tokenizer" , A=True , A=0 , A=1 , A=0 , **A , ) -> Any:
"""simple docstring"""
super().__init__(
is_encoder_decoder=A , tokenizer_class=A , tie_word_embeddings=A , pad_token_id=A , eos_token_id=A , decoder_start_token_id=A , **A , )
_a = vocab_size
_a = d_model
_a = d_kv
_a = d_ff
_a = num_layers
_a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_a = num_heads
_a = relative_attention_num_buckets
_a = relative_attention_max_distance
_a = dropout_rate
_a = layer_norm_epsilon
_a = initializer_factor
_a = feed_forward_proj
_a = use_cache
_a = self.feed_forward_proj.split('''-''' )
_a = act_info[-1]
_a = act_info[0] == '''gated'''
if len(A ) > 1 and act_info[0] != "gated" or len(A ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
_a = '''gelu_new'''
@property
def a__ (self ) -> Any:
"""simple docstring"""
return self.d_model
@property
def a__ (self ) -> Tuple:
"""simple docstring"""
return self.num_heads
@property
def a__ (self ) -> List[Any]:
"""simple docstring"""
return self.num_layers
class __A ( A ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
_a = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
_a = '''past_encoder_sequence + sequence'''
_a = {0: '''batch'''}
_a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_a = {0: '''batch''', 1: '''decoder_sequence'''}
_a = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(A , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a__ (self ) -> int:
"""simple docstring"""
return 13
@property
def a__ (self ) -> float:
"""simple docstring"""
return 5E-4
| 11
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A ( UpperCamelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCAmelCase__ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
lowerCAmelCase__ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
lowerCAmelCase__ = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCAmelCase__ = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
lowerCAmelCase__ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
lowerCAmelCase__ = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
lowerCAmelCase__ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
lowerCAmelCase__ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowerCAmelCase__ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowerCAmelCase__ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowerCAmelCase__ = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
lowerCAmelCase__ = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
lowerCAmelCase__ = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(UpperCamelCase_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ ,lowerCAmelCase__ = int(key_split[2] ), int(key_split[4] )
lowerCAmelCase__ = config.vision_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ = int(key_split[3] )
lowerCAmelCase__ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[
dim : dim * 2, :
]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
else:
lowerCAmelCase__ = rename_key(UpperCamelCase_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCAmelCase__ = val.squeeze_()
else:
lowerCAmelCase__ = val
return orig_state_dict
def A ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple="groupvit-gcc-yfcc" , UpperCamelCase_ : Dict=False ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = GroupViTConfig()
lowerCAmelCase__ = GroupViTModel(UpperCamelCase_ ).eval()
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )["model"]
lowerCAmelCase__ = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ ,lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase_ ) == 0)
# verify result
lowerCAmelCase__ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = processor(text=["a photo of a cat", "a photo of a dog"] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="pt" )
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCamelCase_ )
if model_name == "groupvit-gcc-yfcc":
lowerCAmelCase__ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCAmelCase__ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , UpperCamelCase_ , atol=1E-3 )
processor.save_pretrained(UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
print("Successfully saved processor and model to" , UpperCamelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(UpperCamelCase_ , organization="nielsr" )
model.push_to_hub(UpperCamelCase_ , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 48
| 0
|
def SCREAMING_SNAKE_CASE ( lowercase_ : list ):
def merge(lowercase_ : list , lowercase_ : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(lowercase_ ) <= 1:
return collection
lowercase = len(lowercase_ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ : Tuple = input('''Enter numbers separated by a comma:\n''').strip()
lowercase_ : Union[str, Any] = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 705
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : int = 1_6000 ):
lowercase = int(round(sample_rate * max_length ) )
if len(lowercase_ ) <= sample_length:
return wav
lowercase = randint(0 , len(lowercase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCamelCase :
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
__A = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__A = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
__A = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
__A = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class __UpperCamelCase :
__A = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
__A = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , _lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
lowercase = DatasetDict()
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--label_column_name` to the correct text column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase = feature_extractor.model_input_names[0]
def train_transforms(lowercase_ : int ):
lowercase = []
for audio in batch[data_args.audio_column_name]:
lowercase = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase_ )
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase_ : Dict ):
lowercase = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase = raw_datasets["""train"""].features[data_args.label_column_name].names
lowercase , lowercase = {}, {}
for i, label in enumerate(lowercase_ ):
lowercase = str(lowercase_ )
lowercase = label
# Load the accuracy metric from the datasets package
lowercase = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase_ : Tuple ):
lowercase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids )
lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ )
# Initialize our trainer
lowercase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase_ )
trainer.save_metrics("""eval""" , lowercase_ )
# Write model card and (optionally) push to hub
lowercase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 653
| 0
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
a__ : List[str] = """scheduler_config.json"""
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = 1
snake_case_ = 2
snake_case_ = 3
snake_case_ = 4
snake_case_ = 5
@dataclass
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = 42
class lowercase :
"""simple docstring"""
snake_case_ = SCHEDULER_CONFIG_NAME
snake_case_ = ['dtype']
snake_case_ = []
snake_case_ = True
@classmethod
def _UpperCamelCase ( cls : int , a_ : Dict[str, Any] = None , a_ : Optional[str] = None , a_ : List[str]=False , **a_ : Union[str, Any] , ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = cls.load_config(
pretrained_model_name_or_path=a_ , subfolder=a_ , return_unused_kwargs=a_ , **a_ , )
lowerCamelCase__ , lowerCamelCase__ = cls.from_config(a_ , return_unused_kwargs=a_ , **a_ )
if hasattr(a_ , """create_state""" ) and getattr(a_ , """has_state""" , a_ ):
lowerCamelCase__ = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def _UpperCamelCase ( self : Dict , a_ : Union[str, os.PathLike] , a_ : bool = False , **a_ : Optional[Any] ):
"""simple docstring"""
self.save_config(save_directory=a_ , push_to_hub=a_ , **a_ )
@property
def _UpperCamelCase ( self : str ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def _UpperCamelCase ( cls : List[str] ):
"""simple docstring"""
lowerCamelCase__ = list(set([cls.__name__] + cls._compatibles ) )
lowerCamelCase__ = importlib.import_module(__name__.split(""".""" )[0] )
lowerCamelCase__ = [
getattr(a_ , a_ ) for c in compatible_classes_str if hasattr(a_ , a_ )
]
return compatible_classes
def snake_case (UpperCamelCase : jnp.ndarray , UpperCamelCase : Tuple[int] ):
'''simple docstring'''
assert len(UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(UpperCamelCase ) - x.ndim) ) , UpperCamelCase )
def snake_case (UpperCamelCase : int , UpperCamelCase : List[str]=0.9_9_9 , UpperCamelCase : List[Any]=jnp.floataa ):
'''simple docstring'''
def alpha_bar(UpperCamelCase : List[str] ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
lowerCamelCase__ = []
for i in range(UpperCamelCase ):
lowerCamelCase__ = i / num_diffusion_timesteps
lowerCamelCase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(UpperCamelCase ) / alpha_bar(UpperCamelCase ) , UpperCamelCase ) )
return jnp.array(UpperCamelCase , dtype=UpperCamelCase )
@flax.struct.dataclass
class lowercase :
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
@classmethod
def _UpperCamelCase ( cls : List[str] , a_ : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = scheduler.config
if config.trained_betas is not None:
lowerCamelCase__ = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCamelCase__ = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCamelCase__ = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCamelCase__ = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
lowerCamelCase__ = 1.0 - betas
lowerCamelCase__ = jnp.cumprod(a_ , axis=0 )
return cls(
alphas=a_ , betas=a_ , alphas_cumprod=a_ , )
def snake_case (UpperCamelCase : CommonSchedulerState , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
lowerCamelCase__ = state.alphas_cumprod
lowerCamelCase__ = alphas_cumprod[timesteps] ** 0.5
lowerCamelCase__ = sqrt_alpha_prod.flatten()
lowerCamelCase__ = broadcast_to_shape_from_left(UpperCamelCase , original_samples.shape )
lowerCamelCase__ = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCamelCase__ = sqrt_one_minus_alpha_prod.flatten()
lowerCamelCase__ = broadcast_to_shape_from_left(UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def snake_case (UpperCamelCase : CommonSchedulerState , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = get_sqrt_alpha_prod(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def snake_case (UpperCamelCase : CommonSchedulerState , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = get_sqrt_alpha_prod(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 165
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ : Any = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
a__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 165
| 1
|
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def __init__( self : int ) -> int:
SCREAMING_SNAKE_CASE = []
def _UpperCAmelCase ( self : Dict , a : Union[str, Any] , a : Union[str, Any] , a : Optional[int] , **a : int ) -> List[str]:
self.events.append("""on_init_end""" )
def _UpperCAmelCase ( self : List[str] , a : List[str] , a : Optional[int] , a : Optional[Any] , **a : Union[str, Any] ) -> Union[str, Any]:
self.events.append("""on_train_begin""" )
def _UpperCAmelCase ( self : str , a : Dict , a : str , a : Union[str, Any] , **a : int ) -> Dict:
self.events.append("""on_train_end""" )
def _UpperCAmelCase ( self : Any , a : List[str] , a : Any , a : Tuple , **a : Any ) -> str:
self.events.append("""on_epoch_begin""" )
def _UpperCAmelCase ( self : Optional[int] , a : Any , a : Dict , a : str , **a : List[str] ) -> List[str]:
self.events.append("""on_epoch_end""" )
def _UpperCAmelCase ( self : Dict , a : List[Any] , a : Tuple , a : Union[str, Any] , **a : int ) -> Dict:
self.events.append("""on_step_begin""" )
def _UpperCAmelCase ( self : Dict , a : Dict , a : Union[str, Any] , a : Optional[Any] , **a : str ) -> Any:
self.events.append("""on_step_end""" )
def _UpperCAmelCase ( self : List[Any] , a : Optional[Any] , a : Dict , a : Dict , **a : Any ) -> Optional[Any]:
self.events.append("""on_evaluate""" )
def _UpperCAmelCase ( self : int , a : Any , a : List[str] , a : Optional[Any] , **a : List[str] ) -> Tuple:
self.events.append("""on_predict""" )
def _UpperCAmelCase ( self : Tuple , a : List[str] , a : int , a : str , **a : Tuple ) -> List[Any]:
self.events.append("""on_save""" )
def _UpperCAmelCase ( self : int , a : List[Any] , a : str , a : Optional[int] , **a : Optional[int] ) -> List[str]:
self.events.append("""on_log""" )
def _UpperCAmelCase ( self : int , a : Optional[Any] , a : Tuple , a : List[Any] , **a : Optional[Any] ) -> Optional[int]:
self.events.append("""on_prediction_step""" )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
def _UpperCAmelCase ( self : str ) -> Tuple:
shutil.rmtree(self.output_dir )
def _UpperCAmelCase ( self : Optional[int] , a : int=0 , a : Dict=0 , a : int=64 , a : Any=64 , a : Union[str, Any]=None , a : Tuple=False , **a : Union[str, Any] ) -> str:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
SCREAMING_SNAKE_CASE = RegressionDataset(length=a )
SCREAMING_SNAKE_CASE = RegressionDataset(length=a )
SCREAMING_SNAKE_CASE = RegressionModelConfig(a=a , b=a )
SCREAMING_SNAKE_CASE = RegressionPreTrainedModel(a )
SCREAMING_SNAKE_CASE = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a )
return Trainer(
a , a , train_dataset=a , eval_dataset=a , callbacks=a , )
def _UpperCAmelCase ( self : List[Any] , a : Union[str, Any] , a : Dict ) -> Optional[Any]:
self.assertEqual(len(a ) , len(a ) )
# Order doesn't matter
SCREAMING_SNAKE_CASE = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
SCREAMING_SNAKE_CASE = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
for cba, cba in zip(a , a ):
if isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(a , a )
elif isinstance(a , a ) and not isinstance(a , a ):
self.assertEqual(a , cba.__class__ )
elif not isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(cba.__class__ , a )
else:
self.assertEqual(a , a )
def _UpperCAmelCase ( self : Tuple , a : List[str] ) -> Any:
SCREAMING_SNAKE_CASE = ["""on_init_end""", """on_train_begin"""]
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(trainer.get_eval_dataloader() )
SCREAMING_SNAKE_CASE = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _UpperCAmelCase ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE = self.get_trainer()
SCREAMING_SNAKE_CASE = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# Callbacks passed at init are added to the default callbacks
SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
SCREAMING_SNAKE_CASE = self.get_trainer(disable_tqdm=a )
SCREAMING_SNAKE_CASE = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
SCREAMING_SNAKE_CASE = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
SCREAMING_SNAKE_CASE = self.get_trainer()
SCREAMING_SNAKE_CASE = trainer.pop_callback(a )
self.assertEqual(cb.__class__ , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# We can also add, pop, or remove by instance
SCREAMING_SNAKE_CASE = self.get_trainer()
SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
SCREAMING_SNAKE_CASE = self.get_trainer()
SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[0]
SCREAMING_SNAKE_CASE = trainer.pop_callback(a )
self.assertEqual(a , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=a )
SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# Independent log/save/eval
SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# A bit of everything
SCREAMING_SNAKE_CASE = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
SCREAMING_SNAKE_CASE = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(a ) in warn_mock.call_args[0][0]
| 716
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : int = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase_ ( A ):
'''simple docstring'''
a__ = '''megatron-bert'''
def __init__( self : str , a : Union[str, Any]=29_056 , a : Optional[int]=1_024 , a : str=24 , a : str=16 , a : str=4_096 , a : Any="gelu" , a : Any=0.1 , a : Optional[int]=0.1 , a : Optional[int]=512 , a : Optional[Any]=2 , a : List[str]=0.02 , a : List[Any]=1E-1_2 , a : Any=0 , a : List[Any]="absolute" , a : List[str]=True , **a : Optional[Any] , ) -> List[str]:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
| 450
| 0
|
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
A_ = re.compile(R"\b(a|an|the)\b", re.UNICODE)
A_ = None
def UpperCamelCase__ ( ) -> Optional[Any]:
snake_case__ : List[Any] = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=snake_case__ , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=snake_case__ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> str:
snake_case__ : List[str] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case__ : List[str] = bool(qa['answers']['text'] )
return qid_to_has_ans
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
def remove_articles(__SCREAMING_SNAKE_CASE ):
return ARTICLES_REGEX.sub(' ' , snake_case__ )
def white_space_fix(__SCREAMING_SNAKE_CASE ):
return " ".join(text.split() )
def remove_punc(__SCREAMING_SNAKE_CASE ):
snake_case__ : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__SCREAMING_SNAKE_CASE ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__ ) ) ) )
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
if not s:
return []
return normalize_answer(snake_case__ ).split()
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
return int(normalize_answer(snake_case__ ) == normalize_answer(snake_case__ ) )
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case__ : Any = get_tokens(snake_case__ )
snake_case__ : Optional[int] = get_tokens(snake_case__ )
snake_case__ : Any = collections.Counter(snake_case__ ) & collections.Counter(snake_case__ )
snake_case__ : Optional[int] = sum(common.values() )
if len(snake_case__ ) == 0 or len(snake_case__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case__ : Any = 1.0 * num_same / len(snake_case__ )
snake_case__ : Optional[Any] = 1.0 * num_same / len(snake_case__ )
snake_case__ : Optional[int] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case__ : Dict = {}
snake_case__ : str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case__ : Dict = qa['id']
snake_case__ : List[Any] = [t for t in qa['answers']['text'] if normalize_answer(snake_case__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case__ : Tuple = ['']
if qid not in preds:
print(f"Missing prediction for {qid}" )
continue
snake_case__ : str = preds[qid]
# Take max over all gold answers
snake_case__ : Union[str, Any] = max(compute_exact(snake_case__ , snake_case__ ) for a in gold_answers )
snake_case__ : Any = max(compute_fa(snake_case__ , snake_case__ ) for a in gold_answers )
return exact_scores, fa_scores
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case__ : List[str] = {}
for qid, s in scores.items():
snake_case__ : Optional[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case__ : Tuple = float(not qid_to_has_ans[qid] )
else:
snake_case__ : List[Any] = s
return new_scores
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Tuple:
if not qid_list:
snake_case__ : Union[str, Any] = len(snake_case__ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
snake_case__ : Optional[Any] = len(snake_case__ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
for k in new_eval:
snake_case__ : Optional[int] = new_eval[k]
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
plt.step(snake_case__ , snake_case__ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(snake_case__ , snake_case__ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(snake_case__ )
plt.savefig(snake_case__ )
plt.clf()
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
snake_case__ : Optional[int] = sorted(snake_case__ , key=lambda __SCREAMING_SNAKE_CASE : na_probs[k] )
snake_case__ : Tuple = 0.0
snake_case__ : Union[str, Any] = 1.0
snake_case__ : Dict = 0.0
snake_case__ : List[str] = [1.0]
snake_case__ : Optional[Any] = [0.0]
snake_case__ : Tuple = 0.0
for i, qid in enumerate(snake_case__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case__ : Union[str, Any] = true_pos / float(i + 1 )
snake_case__ : Any = true_pos / float(snake_case__ )
if i == len(snake_case__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(snake_case__ )
recalls.append(snake_case__ )
if out_image:
plot_pr_curve(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return {"ap": 100.0 * avg_prec}
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if out_image_dir and not os.path.exists(snake_case__ ):
os.makedirs(snake_case__ )
snake_case__ : List[str] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case__ : Optional[int] = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
snake_case__ : Tuple = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
snake_case__ : Union[str, Any] = {k: float(snake_case__ ) for k, v in qid_to_has_ans.items()}
snake_case__ : Dict = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(snake_case__ , snake_case__ , 'pr_exact' )
merge_eval(snake_case__ , snake_case__ , 'pr_f1' )
merge_eval(snake_case__ , snake_case__ , 'pr_oracle' )
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if not qid_list:
return
snake_case__ : List[Any] = [na_probs[k] for k in qid_list]
snake_case__ : str = np.ones_like(snake_case__ ) / float(len(snake_case__ ) )
plt.hist(snake_case__ , weights=snake_case__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(f"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(snake_case__ , f"na_prob_hist_{name}.png" ) )
plt.clf()
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
snake_case__ : Tuple = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case__ : int = num_no_ans
snake_case__ : str = cur_score
snake_case__ : Optional[int] = 0.0
snake_case__ : int = sorted(snake_case__ , key=lambda __SCREAMING_SNAKE_CASE : na_probs[k] )
for i, qid in enumerate(snake_case__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case__ : List[str] = scores[qid]
else:
if preds[qid]:
snake_case__ : str = -1
else:
snake_case__ : List[Any] = 0
cur_score += diff
if cur_score > best_score:
snake_case__ : Any = cur_score
snake_case__ : Any = na_probs[qid]
return 100.0 * best_score / len(snake_case__ ), best_thresh
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case__ , snake_case__ : Optional[int] = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
snake_case__ , snake_case__ : Tuple = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
snake_case__ : str = best_exact
snake_case__ : str = exact_thresh
snake_case__ : Optional[Any] = best_fa
snake_case__ : Optional[Any] = fa_thresh
def UpperCamelCase__ ( ) -> List[str]:
with open(OPTS.data_file ) as f:
snake_case__ : int = json.load(snake_case__ )
snake_case__ : List[Any] = dataset_json['data']
with open(OPTS.pred_file ) as f:
snake_case__ : Optional[Any] = json.load(snake_case__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case__ : int = json.load(snake_case__ )
else:
snake_case__ : Dict = {k: 0.0 for k in preds}
snake_case__ : Optional[Any] = make_qid_to_has_ans(snake_case__ ) # maps qid to True/False
snake_case__ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if v]
snake_case__ : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v]
snake_case__ , snake_case__ : Union[str, Any] = get_raw_scores(snake_case__ , snake_case__ )
snake_case__ : Optional[int] = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh )
snake_case__ : Optional[int] = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh )
snake_case__ : Optional[int] = make_eval_dict(snake_case__ , snake_case__ )
if has_ans_qids:
snake_case__ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__ )
merge_eval(snake_case__ , snake_case__ , 'HasAns' )
if no_ans_qids:
snake_case__ : Tuple = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__ )
merge_eval(snake_case__ , snake_case__ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , OPTS.out_image_dir )
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(snake_case__ , snake_case__ )
else:
print(json.dumps(snake_case__ , indent=2 ) )
if __name__ == "__main__":
A_ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 270
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 132
| 0
|
def __lowerCamelCase ( __lowerCAmelCase : int = 100 ) -> int:
__UpperCamelCase : Optional[Any] = n * (n + 1) * (2 * n + 1) / 6
__UpperCamelCase : Any = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 515
|
def __lowerCamelCase ( __lowerCAmelCase : list ) -> list:
__UpperCamelCase : Dict = len(__lowerCAmelCase )
for i in range(1 , __lowerCAmelCase ):
__UpperCamelCase : Dict = collection[i]
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : Dict = i - 1
while low <= high:
__UpperCamelCase : int = (low + high) // 2
if val < collection[mid]:
__UpperCamelCase : str = mid - 1
else:
__UpperCamelCase : str = mid + 1
for j in range(__lowerCAmelCase , __lowerCAmelCase , -1 ):
__UpperCamelCase : str = collection[j - 1]
__UpperCamelCase : int = val
return collection
if __name__ == "__main__":
UpperCamelCase = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 515
| 1
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 31
|
'''simple docstring'''
import re
def __lowerCamelCase ( __lowerCAmelCase : str ) -> list:
return [char.split() for char in re.split(r"""[^ a-z A-Z 0-9 \s]""" , str_ )]
def __lowerCamelCase ( __lowerCAmelCase : str ) -> str:
snake_case = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : bool , __lowerCAmelCase : str ) -> str:
try:
snake_case = split_input(__lowerCAmelCase )
if upper:
snake_case = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
snake_case = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __lowerCamelCase ( __lowerCAmelCase : str ) -> str:
return to_simple_case(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : str ) -> str:
try:
snake_case = to_simple_case(__lowerCAmelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : bool ) -> str:
return to_complex_case(__lowerCAmelCase , __lowerCAmelCase , """_""" )
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : bool ) -> str:
return to_complex_case(__lowerCAmelCase , __lowerCAmelCase , """-""" )
if __name__ == "__main__":
__import__("doctest").testmod()
| 369
| 0
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _lowerCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Dict=False ):
"""simple docstring"""
try:
__SCREAMING_SNAKE_CASE : List[str] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__SCREAMING_SNAKE_CASE : List[str] = default
else:
# KEY is set, convert it to True or False.
try:
__SCREAMING_SNAKE_CASE : List[str] = strtobool(__lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
_lowerCamelCase = parse_flag_from_env("""RUN_SLOW""", default=False)
_lowerCamelCase = parse_flag_from_env("""RUN_REMOTE""", default=False)
_lowerCamelCase = parse_flag_from_env("""RUN_LOCAL""", default=True)
_lowerCamelCase = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
_lowerCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
_lowerCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
_lowerCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
_lowerCamelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
_lowerCamelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
_lowerCamelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
_lowerCamelCase = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def _lowerCAmelCase ( __lowerCamelCase : List[str] ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
__SCREAMING_SNAKE_CASE : Optional[Any] = unittest.skip("test requires faiss" )(__lowerCamelCase )
return test_case
def _lowerCAmelCase ( __lowerCamelCase : Any ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
__SCREAMING_SNAKE_CASE : List[Any] = unittest.skip("test requires regex" )(__lowerCamelCase )
return test_case
def _lowerCAmelCase ( __lowerCamelCase : Dict ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
__SCREAMING_SNAKE_CASE : Optional[int] = unittest.skip("test requires elasticsearch" )(__lowerCamelCase )
return test_case
def _lowerCAmelCase ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
__SCREAMING_SNAKE_CASE : Dict = unittest.skip("test requires sqlalchemy" )(__lowerCamelCase )
return test_case
def _lowerCAmelCase ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
__SCREAMING_SNAKE_CASE : Optional[int] = unittest.skip("test requires PyTorch" )(__lowerCamelCase )
return test_case
def _lowerCAmelCase ( __lowerCamelCase : Tuple ):
"""simple docstring"""
if not config.TF_AVAILABLE:
__SCREAMING_SNAKE_CASE : List[Any] = unittest.skip("test requires TensorFlow" )(__lowerCamelCase )
return test_case
def _lowerCAmelCase ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
__SCREAMING_SNAKE_CASE : Dict = unittest.skip("test requires JAX" )(__lowerCamelCase )
return test_case
def _lowerCAmelCase ( __lowerCamelCase : Dict ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
__SCREAMING_SNAKE_CASE : List[Any] = unittest.skip("test requires Pillow" )(__lowerCamelCase )
return test_case
def _lowerCAmelCase ( __lowerCamelCase : Any ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(__lowerCamelCase )
else:
return test_case
def _lowerCAmelCase ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(__lowerCamelCase )
else:
return test_case
def _lowerCAmelCase ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(__lowerCamelCase )
else:
return test_case
def _lowerCAmelCase ( __lowerCamelCase : str ):
"""simple docstring"""
def _require_spacy_model(__lowerCamelCase : Optional[Any] ):
try:
import spacy # noqa F401
spacy.load(__lowerCamelCase )
except ImportError:
return unittest.skip("test requires spacy" )(__lowerCamelCase )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(__lowerCamelCase ) )(__lowerCamelCase )
else:
return test_case
return _require_spacy_model
def _lowerCAmelCase ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(__lowerCamelCase )
else:
return test_case
def _lowerCAmelCase ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(__lowerCamelCase )
else:
return test_case
def _lowerCAmelCase ( __lowerCamelCase : Dict ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
__SCREAMING_SNAKE_CASE : int = unittest.skip("test is slow" )(__lowerCamelCase )
return test_case
def _lowerCAmelCase ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
__SCREAMING_SNAKE_CASE : List[str] = unittest.skip("test is local" )(__lowerCamelCase )
return test_case
def _lowerCAmelCase ( __lowerCamelCase : Any ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
__SCREAMING_SNAKE_CASE : str = unittest.skip("test is packaged" )(__lowerCamelCase )
return test_case
def _lowerCAmelCase ( __lowerCamelCase : Any ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
__SCREAMING_SNAKE_CASE : Tuple = unittest.skip("test requires remote" )(__lowerCamelCase )
return test_case
def _lowerCAmelCase ( *__lowerCamelCase : Optional[int] ):
"""simple docstring"""
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(__lowerCamelCase ) and name.startswith("test" ):
for decorator in decorators:
__SCREAMING_SNAKE_CASE : Dict = decorator(__lowerCamelCase )
setattr(cls , __lowerCamelCase , __lowerCamelCase )
return cls
return decorate
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
pass
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = 2
@contextmanager
def _lowerCAmelCase ( __lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , __lowerCamelCase : str=1E-16 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = requests.Session().request
def timeout_request(__lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ):
# Change the url to an invalid url so that the connection hangs
__SCREAMING_SNAKE_CASE : List[Any] = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__SCREAMING_SNAKE_CASE : List[str] = timeout
try:
return online_request(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__SCREAMING_SNAKE_CASE : Any = url
__SCREAMING_SNAKE_CASE : Union[str, Any] = e.args[0]
__SCREAMING_SNAKE_CASE : List[Any] = (max_retry_error.args[0].replace("10.255.255.1" , F"""OfflineMock[{url}]""" ),)
__SCREAMING_SNAKE_CASE : List[str] = (max_retry_error,)
raise
def raise_connection_error(__lowerCamelCase : List[Any] , __lowerCamelCase : Dict , **__lowerCamelCase : Optional[Any] ):
raise requests.ConnectionError("Offline mode is enabled." , request=__lowerCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , __lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , __lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def _lowerCAmelCase ( *__lowerCamelCase : Optional[int] , **__lowerCamelCase : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__lowerCamelCase , **__lowerCamelCase ) as tmp_dir:
try:
os.chdir(__lowerCamelCase )
yield
finally:
os.chdir(__lowerCamelCase )
@contextmanager
def _lowerCAmelCase ( ):
"""simple docstring"""
import gc
gc.collect()
__SCREAMING_SNAKE_CASE : str = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _lowerCAmelCase ( ):
"""simple docstring"""
import gc
gc.collect()
__SCREAMING_SNAKE_CASE : Tuple = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _lowerCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return deepcopy(__lowerCamelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__lowerCamelCase ).integers(0 , 100 , 10 ).tolist()
def _lowerCAmelCase ( __lowerCamelCase : List[str] ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(__lowerCamelCase : List[str] , *__lowerCamelCase : Any , **__lowerCamelCase : Dict ):
try:
return func(*__lowerCamelCase , **__lowerCamelCase )
except HTTPError as err:
if str(__lowerCamelCase ).startswith("500" ) or str(__lowerCamelCase ).startswith("502" ):
pytest.xfail(str(__lowerCamelCase ) )
raise err
return decorator.decorator(_wrapper , __lowerCamelCase )
class _SCREAMING_SNAKE_CASE :
def __init__( self : int , UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Optional[int] )->List[Any]:
__SCREAMING_SNAKE_CASE : Tuple = returncode
__SCREAMING_SNAKE_CASE : Any = stdout
__SCREAMING_SNAKE_CASE : int = stderr
async def _lowerCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ):
"""simple docstring"""
while True:
__SCREAMING_SNAKE_CASE : Any = await stream.readline()
if line:
callback(__lowerCamelCase )
else:
break
async def _lowerCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : int=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : Dict=False , __lowerCamelCase : Tuple=False ):
"""simple docstring"""
if echo:
print("\nRunning: " , " ".join(__lowerCamelCase ) )
__SCREAMING_SNAKE_CASE : List[str] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__SCREAMING_SNAKE_CASE : Dict = []
__SCREAMING_SNAKE_CASE : int = []
def tee(__lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]="" ):
__SCREAMING_SNAKE_CASE : str = line.decode("utf-8" ).rstrip()
sink.append(__lowerCamelCase )
if not quiet:
print(__lowerCamelCase , __lowerCamelCase , file=__lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __lowerCamelCase : tee(__lowerCamelCase , __lowerCamelCase , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda __lowerCamelCase : tee(__lowerCamelCase , __lowerCamelCase , sys.stderr , label="stderr:" ) ),
] , timeout=__lowerCamelCase , )
return _RunOutput(await p.wait() , __lowerCamelCase , __lowerCamelCase )
def _lowerCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Any=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[int]=180 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = asyncio.get_event_loop()
__SCREAMING_SNAKE_CASE : Optional[Any] = loop.run_until_complete(
_stream_subprocess(__lowerCamelCase , env=__lowerCamelCase , stdin=__lowerCamelCase , timeout=__lowerCamelCase , quiet=__lowerCamelCase , echo=__lowerCamelCase ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = " ".join(__lowerCamelCase )
if result.returncode > 0:
__SCREAMING_SNAKE_CASE : str = "\n".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def _lowerCAmelCase ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
__SCREAMING_SNAKE_CASE : Dict = re.sub(r"^gw" , "" , __lowerCamelCase , 0 , re.M )
return int(__lowerCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 29500
__SCREAMING_SNAKE_CASE : List[str] = pytest_xdist_worker_id()
return port + uniq_delta
| 447
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCamelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def _lowerCAmelCase ( __lowerCamelCase : Any ):
"""simple docstring"""
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
__SCREAMING_SNAKE_CASE : Dict = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
__SCREAMING_SNAKE_CASE : Tuple = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
__SCREAMING_SNAKE_CASE : Dict = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
__SCREAMING_SNAKE_CASE : List[str] = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
__SCREAMING_SNAKE_CASE : List[Any] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
__SCREAMING_SNAKE_CASE : Dict = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
__SCREAMING_SNAKE_CASE : Any = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def _lowerCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = {}
import re
__SCREAMING_SNAKE_CASE : str = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__SCREAMING_SNAKE_CASE : Tuple = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__SCREAMING_SNAKE_CASE : Tuple = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__SCREAMING_SNAKE_CASE : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__SCREAMING_SNAKE_CASE : List[str] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__SCREAMING_SNAKE_CASE : int = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
__SCREAMING_SNAKE_CASE : List[str] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__SCREAMING_SNAKE_CASE : Dict = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = re_encoder_block_conv_in.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = regex_match.groups()
__SCREAMING_SNAKE_CASE : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
__SCREAMING_SNAKE_CASE : int = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = re_encoder_block_conv_in.sub(__lowerCamelCase , __lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Dict = re_encoder_block_resnet.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = regex_match.groups()
__SCREAMING_SNAKE_CASE : List[str] = int(groups[2] ) * 2 + int(groups[3] )
__SCREAMING_SNAKE_CASE : Any = {"1": 1, "3": 2}[groups[-2]]
__SCREAMING_SNAKE_CASE : Union[str, Any] = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
__SCREAMING_SNAKE_CASE : List[str] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : Any = prefix + resnet_block
__SCREAMING_SNAKE_CASE : List[str] = re_encoder_block_resnet.sub(__lowerCamelCase , __lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = re_encoder_block_proj_out.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = regex_match.groups()
__SCREAMING_SNAKE_CASE : Optional[int] = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : Dict = re_encoder_block_proj_out.sub(__lowerCamelCase , __lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[str] = re_decoder_block_conv_out.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = regex_match.groups()
__SCREAMING_SNAKE_CASE : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
__SCREAMING_SNAKE_CASE : List[Any] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : Optional[Any] = re_decoder_block_conv_out.sub(__lowerCamelCase , __lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = re_decoder_block_resnet.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = regex_match.groups()
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
__SCREAMING_SNAKE_CASE : Any = {"1": 1, "3": 2}[groups[-2]]
__SCREAMING_SNAKE_CASE : Tuple = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
__SCREAMING_SNAKE_CASE : List[Any] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : List[Any] = prefix + resnet_block
__SCREAMING_SNAKE_CASE : Union[str, Any] = re_decoder_block_resnet.sub(__lowerCamelCase , __lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = re_decoder_block_proj_in.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = regex_match.groups()
__SCREAMING_SNAKE_CASE : List[str] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : int = re_decoder_block_proj_in.sub(__lowerCamelCase , __lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = re_prior_cond_conv_out.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = regex_match.groups()
__SCREAMING_SNAKE_CASE : str = int(groups[1] ) * 2 + int(groups[2] ) - 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : Any = re_prior_cond_conv_out.sub(__lowerCamelCase , __lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = re_prior_cond_resnet.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = regex_match.groups()
__SCREAMING_SNAKE_CASE : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
__SCREAMING_SNAKE_CASE : str = {"1": 1, "3": 2}[groups[-2]]
__SCREAMING_SNAKE_CASE : Any = F"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
__SCREAMING_SNAKE_CASE : Union[str, Any] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : Any = prefix + resnet_block
__SCREAMING_SNAKE_CASE : Union[str, Any] = re_prior_cond_resnet.sub(__lowerCamelCase , __lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = re_prior_cond_proj_in.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = regex_match.groups()
__SCREAMING_SNAKE_CASE : Dict = F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : int = re_prior_cond_proj_in.sub(__lowerCamelCase , __lowerCamelCase )
# keep original key
else:
__SCREAMING_SNAKE_CASE : Tuple = original_key
__SCREAMING_SNAKE_CASE : Union[str, Any] = replace_key(__lowerCamelCase )
if F"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(F"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape:
__SCREAMING_SNAKE_CASE : List[str] = model_state_dict[F"""{key_prefix}.{key}"""]
print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
__SCREAMING_SNAKE_CASE : str = original_key
__SCREAMING_SNAKE_CASE : List[str] = original_key
__SCREAMING_SNAKE_CASE : Union[str, Any] = value
return new_dict
@torch.no_grad()
def _lowerCAmelCase ( __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[Any]=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
__SCREAMING_SNAKE_CASE : Dict = requests.get(F"""{PREFIX}{file}""" , allow_redirects=__lowerCamelCase )
os.makedirs(F"""{pytorch_dump_folder_path}/""" , exist_ok=__lowerCamelCase )
open(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , "wb" ).write(r.content )
__SCREAMING_SNAKE_CASE : int = MODEL_MAPPING[model_name.split("/" )[-1]]
__SCREAMING_SNAKE_CASE : List[str] = JukeboxConfig.from_pretrained(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = JukeboxModel(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Optional[int] = {}
for i, dict_name in enumerate(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : str = torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["model"]
__SCREAMING_SNAKE_CASE : Optional[int] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
__SCREAMING_SNAKE_CASE : Optional[int] = old_dic[k]
elif k.endswith(".w" ):
__SCREAMING_SNAKE_CASE : int = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__SCREAMING_SNAKE_CASE : Optional[Any] = old_dic[k]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = old_dic[k]
__SCREAMING_SNAKE_CASE : Optional[Any] = "vqvae" if i == 0 else F"""priors.{3 - i}"""
__SCREAMING_SNAKE_CASE : int = fix_jukebox_keys(__lowerCamelCase , model.state_dict() , __lowerCamelCase , __lowerCamelCase )
weight_dict.append(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = weight_dict.pop(0 )
model.vqvae.load_state_dict(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
with open(F"""{pytorch_dump_folder_path}/mapping.json""" , "w" ) as txtfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCamelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 447
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : Dict = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Dict = 'timesformer'
def __init__( self , _a=224 , _a=16 , _a=3 , _a=8 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-6 , _a=True , _a="divided_space_time" , _a=0 , **_a , ):
"""simple docstring"""
super().__init__(**__snake_case )
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = num_frames
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = initializer_range
a__ = layer_norm_eps
a__ = qkv_bias
a__ = attention_type
a__ = drop_path_rate
| 394
|
import qiskit
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
_a : Any = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_a : List[Any] = qiskit.execute(UpperCamelCase_ , UpperCamelCase_ , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 471
| 0
|
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
_lowercase = XCLIPTextConfig()
# derive patch size from model name
_lowercase = model_name.find("""patch""" )
_lowercase = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
_lowercase = XCLIPVisionConfig(patch_size=snake_case_ , num_frames=snake_case_ )
if "large" in model_name:
_lowercase = 768
_lowercase = 3072
_lowercase = 12
_lowercase = 1024
_lowercase = 4096
_lowercase = 16
_lowercase = 24
_lowercase = 768
_lowercase = 3072
if model_name == "xclip-large-patch14-16-frames":
_lowercase = 336
_lowercase = XCLIPConfig.from_text_vision_configs(snake_case_ , snake_case_ )
if "large" in model_name:
_lowercase = 768
return config
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
# text encoder
if name == "token_embedding.weight":
_lowercase = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
_lowercase = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
_lowercase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
_lowercase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
_lowercase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
_lowercase = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
_lowercase = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
_lowercase = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
_lowercase = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
_lowercase = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
_lowercase = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
_lowercase = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
_lowercase = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
_lowercase = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
_lowercase = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
_lowercase = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
_lowercase = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
_lowercase = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
_lowercase = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
_lowercase = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
_lowercase = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
_lowercase = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
for key in orig_state_dict.copy().keys():
_lowercase = orig_state_dict.pop(snake_case_ )
if "attn.in_proj" in key:
_lowercase = key.split(""".""" )
if key.startswith("""visual""" ):
_lowercase = key_split[3]
_lowercase = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
_lowercase = val[
:dim, :
]
_lowercase = val[
dim : dim * 2, :
]
_lowercase = val[
-dim:, :
]
else:
_lowercase = val[
:dim
]
_lowercase = val[
dim : dim * 2
]
_lowercase = val[
-dim:
]
else:
if "weight" in key:
_lowercase = val[
:dim, :
]
_lowercase = val[
dim : dim * 2, :
]
_lowercase = val[
-dim:, :
]
else:
_lowercase = val[:dim]
_lowercase = val[
dim : dim * 2
]
_lowercase = val[-dim:]
elif key.startswith("""mit""" ):
_lowercase = key_split[2]
_lowercase = config.vision_config.mit_hidden_size
if "weight" in key:
_lowercase = val[:dim, :]
_lowercase = val[dim : dim * 2, :]
_lowercase = val[-dim:, :]
else:
_lowercase = val[:dim]
_lowercase = val[dim : dim * 2]
_lowercase = val[-dim:]
else:
_lowercase = key_split[2]
_lowercase = config.text_config.hidden_size
if "weight" in key:
_lowercase = val[:dim, :]
_lowercase = val[
dim : dim * 2, :
]
_lowercase = val[-dim:, :]
else:
_lowercase = val[:dim]
_lowercase = val[
dim : dim * 2
]
_lowercase = val[-dim:]
else:
_lowercase = rename_key(snake_case_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
_lowercase = val.T
_lowercase = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
if num_frames == 8:
_lowercase = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
_lowercase = """eating_spaghetti.npy"""
elif num_frames == 32:
_lowercase = """eating_spaghetti_32_frames.npy"""
_lowercase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=snake_case_ , repo_type="""dataset""" , )
_lowercase = np.load(snake_case_ )
return list(snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_=None , snake_case_=False ):
_lowercase = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
_lowercase = model_to_url[model_name]
_lowercase = 8
if "16-frames" in model_name:
_lowercase = 16
elif "shot" in model_name:
_lowercase = 32
_lowercase = get_xclip_config(snake_case_ , snake_case_ )
_lowercase = XCLIPModel(snake_case_ )
model.eval()
if "drive" in checkpoint_url:
_lowercase = """pytorch_model.bin"""
gdown.cached_download(snake_case_ , snake_case_ , quiet=snake_case_ )
_lowercase = torch.load(snake_case_ , map_location="""cpu""" )["""model"""]
else:
_lowercase = torch.hub.load_state_dict_from_url(snake_case_ )["""model"""]
_lowercase = convert_state_dict(snake_case_ , snake_case_ )
_lowercase = XCLIPModel(snake_case_ )
_lowercase , _lowercase = model.load_state_dict(snake_case_ , strict=snake_case_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
_lowercase = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
_lowercase = VideoMAEImageProcessor(size=snake_case_ )
_lowercase = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
_lowercase = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
_lowercase = XCLIPProcessor(image_processor=snake_case_ , tokenizer=snake_case_ )
_lowercase = prepare_video(snake_case_ )
_lowercase = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=snake_case_ , return_tensors="""pt""" , padding=snake_case_ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
_lowercase = model(**snake_case_ )
# Verify outputs
_lowercase = outputs.logits_per_video
_lowercase = logits_per_video.softmax(dim=1 )
print("""Probs:""" , snake_case_ )
# kinetics-400
if model_name == "xclip-base-patch32":
_lowercase = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
_lowercase = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
_lowercase = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
_lowercase = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
_lowercase = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
_lowercase = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
_lowercase = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
_lowercase = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
_lowercase = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
_lowercase = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
_lowercase = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
_lowercase = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
_lowercase = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
_lowercase = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
_lowercase = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
_lowercase = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
_lowercase = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
_lowercase = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
assert torch.allclose(snake_case_ , snake_case_ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(snake_case_ , organization="""nielsr""" )
processor.push_to_hub(snake_case_ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(snake_case_ , organization="""nielsr""" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowerCamelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 572
|
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_lowerCamelCase = logging.get_logger(__name__)
@dataclass
class __a :
__SCREAMING_SNAKE_CASE : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__SCREAMING_SNAKE_CASE : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
__SCREAMING_SNAKE_CASE : bool = field(
default=_snake_case ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _UpperCAmelCase ( self : int) ->Tuple:
"""simple docstring"""
_lowercase = self.task_name.lower()
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'train'
__SCREAMING_SNAKE_CASE : Tuple = 'dev'
__SCREAMING_SNAKE_CASE : List[str] = 'test'
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : GlueDataTrainingArguments
__SCREAMING_SNAKE_CASE : str
__SCREAMING_SNAKE_CASE : List[InputFeatures]
def __init__( self : Tuple , lowercase__ : GlueDataTrainingArguments , lowercase__ : PreTrainedTokenizerBase , lowercase__ : Optional[int] = None , lowercase__ : Union[str, Split] = Split.train , lowercase__ : Optional[str] = None , ) ->str:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , lowercase__ , )
_lowercase = args
_lowercase = glue_processors[args.task_name]()
_lowercase = glue_output_modes[args.task_name]
if isinstance(lowercase__ , lowercase__):
try:
_lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""")
# Load data features from cache or dataset file
_lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_lowercase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowercase , _lowercase = label_list[2], label_list[1]
_lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowercase = cached_features_file + """.lock"""
with FileLock(lowercase__):
if os.path.exists(lowercase__) and not args.overwrite_cache:
_lowercase = time.time()
_lowercase = torch.load(lowercase__)
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
_lowercase = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
_lowercase = self.processor.get_test_examples(args.data_dir)
else:
_lowercase = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
_lowercase = examples[:limit_length]
_lowercase = glue_convert_examples_to_features(
lowercase__ , lowercase__ , max_length=args.max_seq_length , label_list=lowercase__ , output_mode=self.output_mode , )
_lowercase = time.time()
torch.save(self.features , lowercase__)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self : str) ->List[Any]:
"""simple docstring"""
return len(self.features)
def __getitem__( self : Optional[Any] , lowercase__ : List[Any]) ->InputFeatures:
"""simple docstring"""
return self.features[i]
def _UpperCAmelCase ( self : Union[str, Any]) ->List[Any]:
"""simple docstring"""
return self.label_list
| 572
| 1
|
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
def __init__( self : Optional[Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Tuple=13 ,__lowerCamelCase : Dict=7 ,__lowerCamelCase : Optional[Any]=True ,__lowerCamelCase : Dict=True ,__lowerCamelCase : List[Any]=False ,__lowerCamelCase : Dict=True ,__lowerCamelCase : Optional[Any]=99 ,__lowerCamelCase : Union[str, Any]=32 ,__lowerCamelCase : List[str]=5 ,__lowerCamelCase : List[Any]=4 ,__lowerCamelCase : List[str]=37 ,__lowerCamelCase : Any="gelu" ,__lowerCamelCase : int=0.1 ,__lowerCamelCase : List[Any]=0.1 ,__lowerCamelCase : Tuple=5_12 ,__lowerCamelCase : List[Any]=16 ,__lowerCamelCase : Union[str, Any]=2 ,__lowerCamelCase : Optional[Any]=0.02 ,__lowerCamelCase : List[Any]=3 ,__lowerCamelCase : Tuple=4 ,__lowerCamelCase : str=None ,):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a = ids_tensor([self.batch_size] ,self.num_choices )
a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowerCamelCase ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : int ,__lowerCamelCase : Dict ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : List[str] ,__lowerCamelCase : Any ,__lowerCamelCase : Tuple ):
'''simple docstring'''
a = BioGptModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase )
a = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : int ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[str] ,__lowerCamelCase : int ,__lowerCamelCase : Tuple ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : str ,):
'''simple docstring'''
a = BioGptForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : List[str] ,__lowerCamelCase : Any ,*__lowerCamelCase : str ):
'''simple docstring'''
a = BioGptModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# create attention mask
a = torch.ones(input_ids.shape ,dtype=torch.long ,device=__lowerCamelCase )
a = self.seq_length // 2
a = 0
# first forward pass
a , a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# change a random masked slice from input_ids
a = ids_tensor((1,) ,__lowerCamelCase ).item() + 1
a = ids_tensor((self.batch_size, 1) ,config.vocab_size ).squeeze(-1 )
a = random_other_next_tokens
# append to next input_ids and attn_mask
a = torch.cat([input_ids, next_tokens] ,dim=-1 )
a = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) ,dtype=torch.long ,device=__lowerCamelCase )] ,dim=1 ,)
# get two different outputs
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase )['''last_hidden_state''']
a = model(__lowerCamelCase ,past_key_values=__lowerCamelCase ,attention_mask=__lowerCamelCase )['''last_hidden_state''']
# select random slice
a = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
a = output_from_no_past[:, -1, random_slice_idx].detach()
a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase ,__lowerCamelCase ,atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[str] ,__lowerCamelCase : Dict ,*__lowerCamelCase : str ):
'''simple docstring'''
a = BioGptModel(config=__lowerCamelCase ).to(__lowerCamelCase ).eval()
a = torch.ones(input_ids.shape ,dtype=torch.long ,device=__lowerCamelCase )
# first forward pass
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,use_cache=__lowerCamelCase )
a , a = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) ,config.vocab_size )
a = ids_tensor((self.batch_size, 3) ,2 )
# append to next input_ids and
a = torch.cat([input_ids, next_tokens] ,dim=-1 )
a = torch.cat([attention_mask, next_attn_mask] ,dim=-1 )
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase )['''last_hidden_state''']
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,past_key_values=__lowerCamelCase )[
'''last_hidden_state'''
]
# select random slice
a = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
a = output_from_no_past[:, -3:, random_slice_idx].detach()
a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase ,__lowerCamelCase ,atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : int ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : List[str] ,*__lowerCamelCase : Any ,__lowerCamelCase : Optional[int]=False ):
'''simple docstring'''
a = BioGptForCausalLM(__lowerCamelCase )
model.to(__lowerCamelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
a = model(__lowerCamelCase ,labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : Optional[int] ,*__lowerCamelCase : List[Any] ):
'''simple docstring'''
a = BioGptModel(__lowerCamelCase )
a = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) ,0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) ,0.01 )
def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : List[str] ,__lowerCamelCase : int ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : str ,*__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
a = self.num_labels
a = BioGptForTokenClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( a_ , a_ , a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (BioGptForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
a = BioGptModelTester(self )
a = ConfigTester(self ,config_class=__lowerCamelCase ,hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__lowerCamelCase ,gradient_checkpointing=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
a = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__lowerCamelCase )
a = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
a = '''left'''
# Define PAD Token = EOS Token = 50256
a = tokenizer.eos_token
a = model.config.eos_token_id
# use different length sentences to test batching
a = [
'''Hello, my dog is a little''',
'''Today, I''',
]
a = tokenizer(__lowerCamelCase ,return_tensors='''pt''' ,padding=__lowerCamelCase )
a = inputs['''input_ids'''].to(__lowerCamelCase )
a = model.generate(
input_ids=__lowerCamelCase ,attention_mask=inputs['''attention_mask'''].to(__lowerCamelCase ) ,)
a = tokenizer(sentences[0] ,return_tensors='''pt''' ).input_ids.to(__lowerCamelCase )
a = model.generate(input_ids=__lowerCamelCase )
a = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
a = tokenizer(sentences[1] ,return_tensors='''pt''' ).input_ids.to(__lowerCamelCase )
a = model.generate(input_ids=__lowerCamelCase ,max_length=model.config.max_length - num_paddings )
a = tokenizer.batch_decode(__lowerCamelCase ,skip_special_tokens=__lowerCamelCase )
a = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=__lowerCamelCase )
a = tokenizer.decode(output_padded[0] ,skip_special_tokens=__lowerCamelCase )
a = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,[non_padded_sentence, padded_sentence] )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = BioGptModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = input_dict['''input_ids''']
a = input_ids.ne(1 ).to(__lowerCamelCase )
a = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
a = BioGptForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,labels=__lowerCamelCase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = '''multi_label_classification'''
a = input_dict['''input_ids''']
a = input_ids.ne(1 ).to(__lowerCamelCase )
a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
a = BioGptForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,labels=__lowerCamelCase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
a = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
a = model(__lowerCamelCase )[0]
a = 4_23_84
a = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape ,__lowerCamelCase )
a = torch.tensor(
[[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__lowerCamelCase ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
a = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__lowerCamelCase )
torch.manual_seed(0 )
a = tokenizer('''COVID-19 is''' ,return_tensors='''pt''' ).to(__lowerCamelCase )
a = model.generate(
**__lowerCamelCase ,min_length=1_00 ,max_length=10_24 ,num_beams=5 ,early_stopping=__lowerCamelCase ,)
a = tokenizer.decode(output_ids[0] ,skip_special_tokens=__lowerCamelCase )
a = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
| 387
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase__ : List[str] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase__ : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCamelCase__ : Optional[int] = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
a = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"""config.{attribute}""" in modeling_source
or f"""getattr(config, \"{attribute}\"""" in modeling_source
or f"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
a = True
# Deal with multi-line cases
elif (
re.search(
rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""", snake_case_, )
is not None
):
a = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
a = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
a = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
a = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
a = True
if not attribute_used:
a = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
a = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
a = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
a = True
elif attribute.endswith('''_token_id''' ):
a = True
# configuration class specific cases
if not case_allowed:
a = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__, [] )
a = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Tuple:
"""simple docstring"""
a = dict(inspect.signature(config_class.__init__ ).parameters )
a = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
a = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
a = {}
if len(config_class.attribute_map ) > 0:
a = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
a = inspect.getsourcefile(snake_case_ )
a = os.path.dirname(snake_case_ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
a = [os.path.join(snake_case_, snake_case_ ) for fn in os.listdir(snake_case_ ) if fn.startswith('''modeling_''' )]
# Get the source code strings
a = []
for path in modeling_paths:
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as fp:
modeling_sources.append(fp.read() )
a = []
for config_param, default_value in zip(snake_case_, snake_case_ ):
# `attributes` here is all the variant names for `config_param`
a = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(snake_case_, snake_case_, snake_case_, snake_case_ ):
unused_attributes.append(attributes[0] )
return sorted(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
"""simple docstring"""
a = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
a = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ), lambda snake_case_ : inspect.isclass(snake_case_ )
and issubclass(snake_case_, snake_case_ )
and inspect.getmodule(snake_case_ ) == inspect.getmodule(_config_class ), )
]
for config_class in config_classes_in_module:
a = check_config_attributes_being_used(snake_case_ )
if len(snake_case_ ) > 0:
a = unused_attributes
if len(snake_case_ ) > 0:
a = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f"""{name}: {attributes}\n"""
raise ValueError(snake_case_ )
if __name__ == "__main__":
check_config_attributes()
| 387
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case : Dict = logging.get_logger(__name__)
class a (UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
__UpperCAmelCase : Dict = 'maskformer-swin'
__UpperCAmelCase : Dict = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : int , lowerCamelCase : Optional[Any]=224 , lowerCamelCase : Any=4 , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : List[Any]=96 , lowerCamelCase : Optional[int]=[2, 2, 6, 2] , lowerCamelCase : Dict=[3, 6, 12, 24] , lowerCamelCase : Dict=7 , lowerCamelCase : Optional[int]=4.0 , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Dict=0.0 , lowerCamelCase : int=0.0 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : int="gelu" , lowerCamelCase : Dict=False , lowerCamelCase : Union[str, Any]=0.02 , lowerCamelCase : Optional[Any]=1E-5 , lowerCamelCase : Any=None , lowerCamelCase : Optional[Any]=None , **lowerCamelCase : str , ) -> str:
super().__init__(**_lowercase )
__snake_case : List[str] = image_size
__snake_case : Dict = patch_size
__snake_case : Union[str, Any] = num_channels
__snake_case : Tuple = embed_dim
__snake_case : int = depths
__snake_case : str = len(_lowercase )
__snake_case : Optional[Any] = num_heads
__snake_case : Any = window_size
__snake_case : int = mlp_ratio
__snake_case : Dict = qkv_bias
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : List[str] = drop_path_rate
__snake_case : Any = hidden_act
__snake_case : Union[str, Any] = use_absolute_embeddings
__snake_case : Tuple = layer_norm_eps
__snake_case : str = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case : Optional[Any] = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
__snake_case : Tuple = ['stem'] + [F'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
__snake_case : List[str] = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 707
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_snake_case : Any = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ):
# Recurse if needed
if "." in tensor_name:
__snake_case : Tuple = tensor_name.split("." )
for split in splits[:-1]:
__snake_case : List[str] = getattr(__lowerCamelCase , __lowerCamelCase )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
__snake_case : Optional[int] = new_module
__snake_case : Union[str, Any] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.' )
__snake_case : Optional[int] = tensor_name in module._buffers
__snake_case : List[str] = getattr(__lowerCamelCase , __lowerCamelCase )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
__snake_case : List[Any] = False
__snake_case : Optional[int] = False
if is_buffer or not is_bitsandbytes_available():
__snake_case : Dict = False
__snake_case : Optional[int] = False
else:
__snake_case : Any = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
__snake_case : Union[str, Any] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
__snake_case : Union[str, Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
__snake_case : Any = old_value.to(__lowerCamelCase )
elif isinstance(__lowerCamelCase , torch.Tensor ):
__snake_case : int = value.to("cpu" )
if value.dtype == torch.inta:
__snake_case : Optional[int] = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
__snake_case : Optional[int] = torch.tensor(__lowerCamelCase , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __lowerCamelCase ) and fpaa_statistics is None:
__snake_case : List[Any] = new_value.T
__snake_case : Tuple = old_value.__dict__
if is_abit:
__snake_case : List[str] = bnb.nn.IntaParams(__lowerCamelCase , requires_grad=__lowerCamelCase , **__lowerCamelCase ).to(__lowerCamelCase )
elif is_abit:
__snake_case : Optional[int] = bnb.nn.Paramsabit(__lowerCamelCase , requires_grad=__lowerCamelCase , **__lowerCamelCase ).to(__lowerCamelCase )
__snake_case : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(__lowerCamelCase ) )
else:
if value is None:
__snake_case : Tuple = old_value.to(__lowerCamelCase )
elif isinstance(__lowerCamelCase , torch.Tensor ):
__snake_case : Dict = value.to(__lowerCamelCase )
else:
__snake_case : List[Any] = torch.tensor(__lowerCamelCase , device=__lowerCamelCase )
if is_buffer:
__snake_case : Optional[Any] = new_value
else:
__snake_case : int = nn.Parameter(__lowerCamelCase , requires_grad=old_value.requires_grad )
__snake_case : Tuple = new_value
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False ):
for name, module in model.named_children():
if current_key_name is None:
__snake_case : List[Any] = []
current_key_name.append(__lowerCamelCase )
if (isinstance(__lowerCamelCase , nn.Linear ) or isinstance(__lowerCamelCase , __lowerCamelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(__lowerCamelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__snake_case , __snake_case : Union[str, Any] = module.weight.shape
else:
__snake_case : List[Any] = module.in_features
__snake_case : Tuple = module.out_features
if quantization_config.quantization_method() == "llm_int8":
__snake_case : Tuple = bnb.nn.LinearabitLt(
__lowerCamelCase , __lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
__snake_case : List[str] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
__snake_case : Optional[int] = bnb.nn.Linearabit(
__lowerCamelCase , __lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
__snake_case : int = True
# Store the module class in case we need to transpose the weight later
__snake_case : str = type(__lowerCamelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowerCamelCase )
if len(list(module.children() ) ) > 0:
__snake_case , __snake_case : Tuple = _replace_with_bnb_linear(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_been_replaced=__lowerCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None ):
__snake_case : List[str] = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
__snake_case , __snake_case : Any = _replace_with_bnb_linear(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , __lowerCamelCase , )
return replace_with_bnb_linear(*__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , __lowerCamelCase , )
return set_module_quantized_tensor_to_device(*__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Optional[int] = deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
__snake_case : Any = find_tied_parameters(__lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__snake_case : Dict = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__snake_case : Any = sum(__lowerCamelCase , [] )
__snake_case : Optional[int] = len(__lowerCamelCase ) > 0
# Check if it is a base model
__snake_case : Dict = not hasattr(__lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__snake_case : Optional[int] = list(model.named_children() )
__snake_case : List[str] = [list_modules[-1][0]]
# add last module together with tied weights
__snake_case : int = set(__lowerCamelCase ) - set(__lowerCamelCase )
__snake_case : Optional[Any] = list(set(__lowerCamelCase ) ) + list(__lowerCamelCase )
# remove ".weight" from the keys
__snake_case : Optional[Any] = [".weight", ".bias"]
__snake_case : Any = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__snake_case : Optional[int] = name.replace(__lowerCamelCase , "" )
filtered_module_names.append(__lowerCamelCase )
return filtered_module_names
| 203
| 0
|
"""simple docstring"""
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> float:
def get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_snake_case = []
_snake_case = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_snake_case = int(max(0 , i - limit ) )
_snake_case = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCAmelCase_ )
_snake_case = f"""{_stra[0:_stra.index(lowerCAmelCase_ )]} {_stra[_stra.index(lowerCAmelCase_ ) + 1:]}"""
return "".join(lowerCAmelCase_ )
# matching characters
_snake_case = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = len(lowerCAmelCase_ )
# transposition
_snake_case = (
len([(ca, ca) for ca, ca in zip(lowerCAmelCase_ , lowerCAmelCase_ ) if ca != ca] ) // 2
)
if not match_count:
_snake_case = 0.0
else:
_snake_case = (
1
/ 3
* (
match_count / len(lowerCAmelCase_ )
+ match_count / len(lowerCAmelCase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_snake_case = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 103
|
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def UpperCAmelCase ( A : Dict ):
'''simple docstring'''
_UpperCAmelCase = SwinConfig()
_UpperCAmelCase = swin_name.split('_' )
_UpperCAmelCase = name_split[1]
_UpperCAmelCase = int(name_split[4] )
_UpperCAmelCase = int(name_split[3][-1] )
if model_size == "tiny":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 6, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "small":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "base":
_UpperCAmelCase = 128
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (4, 8, 16, 32)
else:
_UpperCAmelCase = 192
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (6, 12, 24, 48)
if "in22k" in swin_name:
_UpperCAmelCase = 2_1841
else:
_UpperCAmelCase = 1000
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(A ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = img_size
_UpperCAmelCase = num_classes
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
return config
def UpperCAmelCase ( A : str ):
'''simple docstring'''
if "patch_embed.proj" in name:
_UpperCAmelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_UpperCAmelCase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_UpperCAmelCase = 'encoder.' + name
if "attn.proj" in name:
_UpperCAmelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_UpperCAmelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_UpperCAmelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_UpperCAmelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_UpperCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_UpperCAmelCase = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
_UpperCAmelCase = 'layernorm.weight'
if name == "norm.bias":
_UpperCAmelCase = 'layernorm.bias'
if "head" in name:
_UpperCAmelCase = name.replace('head' , 'classifier' )
else:
_UpperCAmelCase = 'swin.' + name
return name
def UpperCAmelCase ( A : int , A : str ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_UpperCAmelCase = orig_state_dict.pop(A )
if "mask" in key:
continue
elif "qkv" in key:
_UpperCAmelCase = key.split('.' )
_UpperCAmelCase = int(key_split[1] )
_UpperCAmelCase = int(key_split[3] )
_UpperCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase = val[:dim, :]
_UpperCAmelCase = val[
dim : dim * 2, :
]
_UpperCAmelCase = val[-dim:, :]
else:
_UpperCAmelCase = val[
:dim
]
_UpperCAmelCase = val[
dim : dim * 2
]
_UpperCAmelCase = val[
-dim:
]
else:
_UpperCAmelCase = val
return orig_state_dict
def UpperCAmelCase ( A : List[Any] , A : Any ):
'''simple docstring'''
_UpperCAmelCase = timm.create_model(A , pretrained=A )
timm_model.eval()
_UpperCAmelCase = get_swin_config(A )
_UpperCAmelCase = SwinForImageClassification(A )
model.eval()
_UpperCAmelCase = convert_state_dict(timm_model.state_dict() , A )
model.load_state_dict(A )
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
_UpperCAmelCase = Image.open(requests.get(A , stream=A ).raw )
_UpperCAmelCase = image_processor(images=A , return_tensors='pt' )
_UpperCAmelCase = timm_model(inputs['pixel_values'] )
_UpperCAmelCase = model(**A ).logits
assert torch.allclose(A , A , atol=1e-3 )
print(f'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(A )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowercase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 573
| 0
|
'''simple docstring'''
def lowerCAmelCase_ ( a : int , a : int ):
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
a__ = str(bin(a ) )
binary_number += "0" * shift_amount
return binary_number
def lowerCAmelCase_ ( a : int , a : int ):
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
a__ = str(bin(a ) )[2:]
if shift_amount >= len(a ):
return "0b0"
a__ = binary_number[: len(a ) - shift_amount]
return "0b" + shifted_binary_number
def lowerCAmelCase_ ( a : int , a : int ):
if number >= 0: # Get binary representation of positive number
a__ = '0' + str(bin(a ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
a__ = len(bin(a )[3:] ) # Find 2's complement of number
a__ = bin(abs(a ) - (1 << binary_number_length) )[3:]
a__ = (
'1' + '0' * (binary_number_length - len(a )) + binary_number
)
if shift_amount >= len(a ):
return "0b" + binary_number[0] * len(a )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(a ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 126
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__A : Any = '\nHuman: <<task>>\n\nAssistant: '
__A : int = 'huggingface-tools/default-prompts'
__A : Tuple = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def lowerCAmelCase_ ( a : int , a : Optional[Any] , a : Tuple="run" ):
if prompt_or_repo_id is None:
a__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , a ) is not None:
return prompt_or_repo_id
a__ = cached_file(
a , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(a , 'r' , encoding='utf-8' ) as f:
return f.read()
| 126
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class a__ ( __A ):
lowercase_ = """convbert"""
def __init__( self : Optional[int] , UpperCamelCase_ : Union[str, Any]=30522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : int=12 , UpperCamelCase_ : str=3072 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : List[Any]=512 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : List[Any]=0.02 , UpperCamelCase_ : Dict=1e-12 , UpperCamelCase_ : Tuple=1 , UpperCamelCase_ : Union[str, Any]=0 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : List[str]=768 , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : int=9 , UpperCamelCase_ : Dict=1 , UpperCamelCase_ : List[str]=None , **UpperCamelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : List[str] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : str = type_vocab_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Dict = layer_norm_eps
__UpperCAmelCase : Dict = embedding_size
__UpperCAmelCase : Any = head_ratio
__UpperCAmelCase : str = conv_kernel_size
__UpperCAmelCase : List[Any] = num_groups
__UpperCAmelCase : Tuple = classifier_dropout
class a__ ( __A ):
@property
def a_ ( self : Optional[Any]):
"""simple docstring"""
if self.task == "multiple-choice":
__UpperCAmelCase : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__UpperCAmelCase : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 77
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE : List[Any] = {
"b0": {
"hidden_dim": 12_80,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 2_24,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 12_80,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 2_40,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 14_08,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 2_60,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 15_36,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 3_00,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 17_92,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 3_80,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 20_48,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 4_56,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 23_04,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 5_28,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 25_60,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 6_00,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
a_ : Optional[int] = EfficientNetConfig()
a_ : Optional[Any] = CONFIG_MAP[model_name]["""hidden_dim"""]
a_ : Optional[Any] = CONFIG_MAP[model_name]["""width_coef"""]
a_ : List[Any] = CONFIG_MAP[model_name]["""depth_coef"""]
a_ : Optional[int] = CONFIG_MAP[model_name]["""image_size"""]
a_ : Any = CONFIG_MAP[model_name]["""dropout_rate"""]
a_ : Any = CONFIG_MAP[model_name]["""dw_padding"""]
a_ : Any = """huggingface/label-files"""
a_ : Union[str, Any] = """imagenet-1k-id2label.json"""
a_ : List[Any] = 10_00
a_ : int = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" ) , """r""" ) )
a_ : Tuple = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
a_ : List[Any] = idalabel
a_ : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( ):
"""simple docstring"""
a_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a_ : List[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
a_ : Tuple = CONFIG_MAP[model_name]["""image_size"""]
a_ : List[Any] = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=SCREAMING_SNAKE_CASE_ , )
return preprocessor
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
a_ : List[str] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
a_ : List[Any] = sorted(set(SCREAMING_SNAKE_CASE_ ) )
a_ : Optional[int] = len(SCREAMING_SNAKE_CASE_ )
a_ : List[str] = {b: str(SCREAMING_SNAKE_CASE_ ) for b, i in zip(SCREAMING_SNAKE_CASE_ , range(SCREAMING_SNAKE_CASE_ ) )}
a_ : Optional[Any] = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
a_ : List[Any] = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
a_ : Tuple = {}
for item in rename_keys:
if item[0] in original_param_names:
a_ : List[str] = """efficientnet.""" + item[1]
a_ : List[str] = """classifier.weight"""
a_ : Optional[int] = """classifier.bias"""
return key_mapping
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
a_ : Dict = key_mapping[key]
if "_conv" in key and "kernel" in key:
a_ : Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a_ : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a_ : Any = torch.from_numpy(np.transpose(SCREAMING_SNAKE_CASE_ ) )
else:
a_ : Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
a_ : Optional[Any] = model_classes[model_name](
include_top=SCREAMING_SNAKE_CASE_ , weights="""imagenet""" , input_tensor=SCREAMING_SNAKE_CASE_ , input_shape=SCREAMING_SNAKE_CASE_ , pooling=SCREAMING_SNAKE_CASE_ , classes=10_00 , classifier_activation="""softmax""" , )
a_ : Dict = original_model.trainable_variables
a_ : Dict = original_model.non_trainable_variables
a_ : int = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a_ : Union[str, Any] = param.numpy()
a_ : Tuple = list(tf_params.keys() )
# Load HuggingFace model
a_ : List[str] = get_efficientnet_config(SCREAMING_SNAKE_CASE_ )
a_ : int = EfficientNetForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
a_ : List[Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
a_ : str = rename_keys(SCREAMING_SNAKE_CASE_ )
replace_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Initialize preprocessor and preprocess input image
a_ : Dict = convert_image_processor(SCREAMING_SNAKE_CASE_ )
a_ : str = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a_ : List[Any] = hf_model(**SCREAMING_SNAKE_CASE_ )
a_ : List[Any] = outputs.logits.detach().numpy()
# Original model inference
a_ : List[str] = False
a_ : Union[str, Any] = CONFIG_MAP[model_name]["""image_size"""]
a_ : Tuple = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a_ : List[Any] = image.img_to_array(SCREAMING_SNAKE_CASE_ )
a_ : Union[str, Any] = np.expand_dims(SCREAMING_SNAKE_CASE_ , axis=0 )
a_ : Any = original_model.predict(SCREAMING_SNAKE_CASE_ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.mkdir(SCREAMING_SNAKE_CASE_ )
# Save converted model and image processor
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
preprocessor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
a_ : str = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(SCREAMING_SNAKE_CASE_ )
hf_model.push_to_hub(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 419
| 0
|
from collections.abc import Callable
import numpy as np
def lowerCAmelCase_ ( lowercase: Callable , lowercase: float , lowercase: float , lowercase: float , lowercase: float ) -> np.array:
'''simple docstring'''
_UpperCamelCase: Tuple = int(np.ceil((x_end - xa) / step_size ) )
_UpperCamelCase: Tuple = np.zeros((n + 1,) )
_UpperCamelCase: str = ya
_UpperCamelCase: Union[str, Any] = xa
for k in range(lowercase ):
_UpperCamelCase: Tuple = y[k] + step_size * ode_func(lowercase , y[k] )
_UpperCamelCase: Any = y[k] + (
(step_size / 2) * (ode_func(lowercase , y[k] ) + ode_func(x + step_size , lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __magic_name__ ( __a ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = '''philschmid/bart-large-cnn-samsum'''
lowerCAmelCase : Any = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
lowerCAmelCase : Any = '''summarizer'''
lowerCAmelCase : Tuple = AutoTokenizer
lowerCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM
lowerCAmelCase : Union[str, Any] = ['''text''']
lowerCAmelCase : Dict = ['''text''']
def lowerCAmelCase ( self : str , _lowercase : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor(_lowercase , return_tensors='''pt''' , truncation=_lowercase )
def lowerCAmelCase ( self : List[Any] , _lowercase : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**_lowercase )[0]
def lowerCAmelCase ( self : int , _lowercase : List[str] ):
"""simple docstring"""
return self.pre_processor.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
| 264
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : torch.FloatTensor
SCREAMING_SNAKE_CASE_ : torch.FloatTensor
SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] =None
class _lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =2
@register_to_config
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1_00 , SCREAMING_SNAKE_CASE__ : float = 1.007 , SCREAMING_SNAKE_CASE__ : float = 80 , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 50 , ):
"""simple docstring"""
UpperCamelCase = sigma_max
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : Optional[int] = None ):
"""simple docstring"""
return sample
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCamelCase = num_inference_steps
UpperCamelCase = np.arange(0 , self.num_inference_steps )[::-1].copy()
UpperCamelCase = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
UpperCamelCase = torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
UpperCamelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
UpperCamelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCamelCase = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE__ ).to(sample.device )
UpperCamelCase = sigma + gamma * sigma
UpperCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_hat + sigma_hat * model_output
UpperCamelCase = (sample_hat - pred_original_sample) / sigma_hat
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE__ , derivative=SCREAMING_SNAKE_CASE__ , pred_original_sample=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_prev + sigma_prev * model_output
UpperCamelCase = (sample_prev - pred_original_sample) / sigma_prev
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE__ , derivative=SCREAMING_SNAKE_CASE__ , pred_original_sample=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
raise NotImplementedError()
| 282
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 282
| 1
|
'''simple docstring'''
from string import ascii_uppercase
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {char: i for i, char in enumerate(ascii_uppercase)}
SCREAMING_SNAKE_CASE__ : List[str] = dict(enumerate(ascii_uppercase))
def a ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> str:
snake_case__ =len(UpperCamelCase_ )
snake_case__ =0
while True:
if x == i:
snake_case__ =0
if len(UpperCamelCase_ ) == len(UpperCamelCase_ ):
break
key += key[i]
i += 1
return key
def a ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> str:
snake_case__ =''
snake_case__ =0
for letter in message:
if letter == " ":
cipher_text += " "
else:
snake_case__ =(dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def a ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> str:
snake_case__ =''
snake_case__ =0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
snake_case__ =(dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def a ( ) -> None:
snake_case__ ='THE GERMAN ATTACK'
snake_case__ ='SECRET'
snake_case__ =generate_key(UpperCamelCase_ , UpperCamelCase_ )
snake_case__ =cipher_text(UpperCamelCase_ , UpperCamelCase_ )
print(f"""Encrypted Text = {s}""" )
print(f"""Original Text = {original_text(UpperCamelCase_ , UpperCamelCase_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 581
|
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class a__( snake_case__ , unittest.TestCase ):
a_ : str = PriorTransformer
a_ : Tuple = '''hidden_states'''
@property
def _lowercase ( self ) -> int:
snake_case__ =4
snake_case__ =8
snake_case__ =7
snake_case__ =floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
snake_case__ =floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
snake_case__ =floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowercase ( self , _UpperCAmelCase=0 ) -> List[str]:
torch.manual_seed(_UpperCAmelCase )
snake_case__ =4
snake_case__ =8
snake_case__ =7
snake_case__ =torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
snake_case__ =torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
snake_case__ =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _lowercase ( self ) -> str:
return (4, 8)
@property
def _lowercase ( self ) -> Any:
return (4, 8)
def _lowercase ( self ) -> Dict:
snake_case__ ={
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
snake_case__ =self.dummy_input
return init_dict, inputs_dict
def _lowercase ( self ) -> List[Any]:
snake_case__ , snake_case__ =PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(_UpperCAmelCase )
snake_case__ =model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _lowercase ( self ) -> Optional[Any]:
snake_case__ , snake_case__ =self.prepare_init_args_and_inputs_for_common()
snake_case__ =self.model_class(**_UpperCAmelCase )
snake_case__ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ =[*signature.parameters.keys()]
snake_case__ =['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , _UpperCAmelCase )
def _lowercase ( self ) -> str:
snake_case__ =PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
snake_case__ =model.to(_UpperCAmelCase )
if hasattr(_UpperCAmelCase , 'set_default_attn_processor' ):
model.set_default_attn_processor()
snake_case__ =self.get_dummy_seed_input()
with torch.no_grad():
snake_case__ =model(**_UpperCAmelCase )[0]
snake_case__ =output[0, :5].flatten().cpu()
print(_UpperCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
snake_case__ =torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-2 ) )
@slow
class a__( unittest.TestCase ):
def _lowercase ( self , _UpperCAmelCase=1 , _UpperCAmelCase=768 , _UpperCAmelCase=77 , _UpperCAmelCase=0 ) -> Optional[Any]:
torch.manual_seed(_UpperCAmelCase )
snake_case__ =batch_size
snake_case__ =embedding_dim
snake_case__ =num_embeddings
snake_case__ =torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
snake_case__ =torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
snake_case__ =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowercase ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
snake_case__ =PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(_UpperCAmelCase )
snake_case__ =self.get_dummy_seed_input(seed=_UpperCAmelCase )
with torch.no_grad():
snake_case__ =model(**_UpperCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
snake_case__ =sample[0, :8].flatten().cpu()
print(_UpperCAmelCase )
snake_case__ =torch.tensor(_UpperCAmelCase )
assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
| 581
| 1
|
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
snake_case__ = datasets.logging.get_logger(__name__)
snake_case__ = """\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
"""
snake_case__ = """\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project's README at https://github.com/google-research/bleurt#readme for more information.
"""
snake_case__ = """
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
'scores': List of scores.
Examples:
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> bleurt = datasets.load_metric(\"bleurt\")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results[\"scores\"]])
[1.03, 1.04]
"""
snake_case__ = {
"""bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""",
"""bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""",
"""bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""",
"""bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""",
"""bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""",
"""bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""",
"""BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""",
"""BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""",
"""BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""",
"""BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def _lowerCAmelCase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
lowercase : str = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
lowercase : Dict = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
lowercase : Any = self.config_name.upper()
else:
raise KeyError(
f'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
lowercase : Any = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
lowercase : List[Any] = score.BleurtScorer(os.path.join(lowerCAmelCase , lowerCAmelCase ) )
def _lowerCAmelCase ( self : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
lowercase : Optional[Any] = self.scorer.score(references=lowerCAmelCase , candidates=lowerCAmelCase )
return {"scores": scores}
| 583
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase_ ( UpperCAmelCase_ : int ):
lowercase : str = filter(lambda UpperCAmelCase_ : p.requires_grad , model.parameters() )
lowercase : List[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
snake_case__ = logging.getLogger(__name__)
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] ):
if metric == "rouge2":
lowercase : int = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
lowercase : List[Any] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
lowercase : Union[str, Any] = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
lowercase : Union[str, Any] = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
''' function.''' )
lowercase : Tuple = ModelCheckpoint(
dirpath=UpperCAmelCase_ , filename=UpperCAmelCase_ , monitor=f'''val_{metric}''' , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ):
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=UpperCAmelCase_ , verbose=UpperCAmelCase_ , )
class UpperCAmelCase ( pl.Callback ):
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] ):
lowercase : List[str] = {f'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase )
@rank_zero_only
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase : pl.Trainer , lowerCAmelCase : pl.LightningModule , lowerCAmelCase : str , lowerCAmelCase : List[str]=True ):
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
lowercase : List[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
lowercase : int = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowercase : List[str] = od / '''test_results.txt'''
lowercase : Union[str, Any] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowercase : Optional[int] = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
lowercase : str = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=lowerCAmelCase )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase )
with open(lowerCAmelCase , '''a+''' ) as writer:
for key in sorted(lowerCAmelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
lowercase : Optional[Any] = metrics[key]
if isinstance(lowerCAmelCase , torch.Tensor ):
lowercase : Optional[Any] = val.item()
lowercase : int = f'''{key}: {val:.6f}\n'''
writer.write(lowerCAmelCase )
if not save_generations:
return
if "preds" in metrics:
lowercase : Dict = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(lowerCAmelCase )
@rank_zero_only
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str ):
try:
lowercase : Dict = pl_module.model.model.num_parameters()
except AttributeError:
lowercase : str = pl_module.model.num_parameters()
lowercase : Optional[Any] = count_trainable_parameters(lowerCAmelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def _lowerCAmelCase ( self : Any , lowerCAmelCase : pl.Trainer , lowerCAmelCase : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase , lowerCAmelCase , '''test''' )
@rank_zero_only
def _lowerCAmelCase ( self : int , lowerCAmelCase : pl.Trainer , lowerCAmelCase : Union[str, Any] ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 583
| 1
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCAmelCase ( lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[int] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCAmelCase_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCAmelCase_ : Union[str, Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
lowerCAmelCase_ : Dict = [[0.0, 0.0], [0.0, 0.0]]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = matrix[1][1], matrix[0][0]
lowerCAmelCase_ , lowerCAmelCase_ : Any = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCAmelCase_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCAmelCase_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCAmelCase_ : Tuple = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
lowerCAmelCase_ : Tuple = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCAmelCase_ : Optional[int] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCAmelCase_ : Any = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCAmelCase_ : Optional[int] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCAmelCase_ : Tuple = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCAmelCase_ : List[Any] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCAmelCase_ : List[str] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCAmelCase_ : Optional[Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCAmelCase_ : Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCAmelCase_ : str = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCAmelCase_ : int = array(lowerCAmelCase_ )
for i in range(3 ):
for j in range(3 ):
lowerCAmelCase_ : str = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCAmelCase_ : List[Any] = array(lowerCAmelCase_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCAmelCase_ )
# Calculate the inverse of the matrix
return [[float(d(lowerCAmelCase_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 702
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCAmelCase : Dict =None
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : Dict ={
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCAmelCase : Tuple ="""▁"""
# Segments (not really needed)
_UpperCAmelCase : str =0
_UpperCAmelCase : List[str] =1
_UpperCAmelCase : int =2
_UpperCAmelCase : Any =3
_UpperCAmelCase : List[Any] =4
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = """left"""
SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : Dict = do_lower_case
lowerCAmelCase_ : Dict = remove_space
lowerCAmelCase_ : List[str] = keep_accents
lowerCAmelCase_ : List[str] = vocab_file
lowerCAmelCase_ : str = False if not self.vocab_file else True
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : str = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 619
| 0
|
import random
def __UpperCAmelCase ( __a : int ) -> bool:
"""simple docstring"""
_a : str = num - 1
_a : Dict = 0
while s % 2 == 0:
_a : Dict = s // 2
t += 1
for _ in range(5 ):
_a : Dict = random.randrange(2 ,num - 1 )
_a : List[str] = pow(__a ,__a ,__a )
if v != 1:
_a : Any = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_a : List[Any] = i + 1
_a : List[Any] = (v**2) % num
return True
def __UpperCAmelCase ( __a : int ) -> bool:
"""simple docstring"""
if num < 2:
return False
_a : str = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__a )
def __UpperCAmelCase ( __a : int = 1_024 ) -> int:
"""simple docstring"""
while True:
_a : List[Any] = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(__a ):
return num
if __name__ == "__main__":
a__ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 14
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase ( __a : Tuple ,__a : Dict ,__a : List[str] ,__a : Optional[Any] ,__a : Tuple ) -> Dict:
"""simple docstring"""
with open(__a ) as metadata_file:
_a : Optional[Any] = json.load(__a )
_a : List[Any] = LukeConfig(use_entity_aware_attention=__a ,**metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_a : Optional[Any] = torch.load(__a ,map_location='''cpu''' )['''module''']
# Load the entity vocab file
_a : Any = load_original_entity_vocab(__a )
# add an entry for [MASK2]
_a : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_a : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_a : Optional[int] = AddedToken('''<ent>''' ,lstrip=__a ,rstrip=__a )
_a : Tuple = AddedToken('''<ent2>''' ,lstrip=__a ,rstrip=__a )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''r''' ) as f:
_a : List[str] = json.load(__a )
_a : Tuple = '''MLukeTokenizer'''
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''w''' ) as f:
json.dump(__a ,__a )
with open(os.path.join(__a ,MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) ,'''w''' ) as f:
json.dump(__a ,__a )
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
_a : str = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_a : Tuple = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_a : Any = state_dict['''embeddings.word_embeddings.weight''']
_a : Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
_a : Any = word_emb[enta_init_index].unsqueeze(0 )
_a : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_a : Tuple = state_dict[bias_name]
_a : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_a : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
_a : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_a : Tuple = F"""encoder.layer.{layer_index}.attention.self."""
_a : List[Any] = state_dict[prefix + matrix_name]
_a : Dict = state_dict[prefix + matrix_name]
_a : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_a : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
_a : Optional[int] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_a : int = state_dict['''entity_predictions.bias''']
_a : int = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_a : Optional[int] = LukeForMaskedLM(config=__a ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_a : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_a : Optional[int] = state_dict[key]
else:
_a : Tuple = state_dict[key]
_a , _a : int = model.load_state_dict(__a ,strict=__a )
if set(__a ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a ,task='''entity_classification''' )
_a : int = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_a : List[Any] = (0, 9)
_a : Tuple = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : int = model(**__a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : List[str] = torch.Size((1, 33, 768) )
_a : Union[str, Any] = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : str = torch.Size((1, 1, 768) )
_a : List[Any] = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
_a : Dict = '''Tokyo is the capital of <mask>.'''
_a : List[str] = (24, 30)
_a : Optional[int] = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : Optional[Any] = model(**__a )
_a : Any = encoding['''input_ids'''][0].tolist()
_a : Optional[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_a : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__a )
_a : Any = outputs.entity_logits[0][0].argmax().item()
_a : Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__a ) )
model.save_pretrained(__a )
def __UpperCAmelCase ( __a : List[Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_a : int = [json.loads(__a ) for line in open(__a )]
_a : List[Any] = {}
for entry in data:
_a : int = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_a : List[Any] = entity_id
break
_a : Dict = F"""{language}:{entity_name}"""
_a : int = entity_id
return new_mapping
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 14
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = "▁"
_A = {"vocab_file": "spiece.model"}
_A = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
_A = {
"google/pegasus-xsum": 5_12,
}
_A = logging.get_logger(__name__)
class _lowerCAmelCase ( __a ):
_lowercase =VOCAB_FILES_NAMES
_lowercase =VOCAB_FILES_NAMES
_lowercase =PRETRAINED_VOCAB_FILES_MAP
_lowercase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase =['''input_ids''', '''attention_mask''']
def __init__( self , _UpperCamelCase , _UpperCamelCase="<pad>" , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<mask_2>" , _UpperCamelCase="<mask_1>" , _UpperCamelCase=None , _UpperCamelCase=103 , _UpperCamelCase = None , **_UpperCamelCase , ) -> None:
lowerCAmelCase_ = offset
if additional_special_tokens is not None:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError(
f"""additional_special_tokens should be of type {type(_UpperCamelCase )}, but is"""
f""" {type(_UpperCamelCase )}""" )
lowerCAmelCase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(_UpperCamelCase ) , self.offset - 1 )
]
if len(set(_UpperCamelCase ) ) != len(_UpperCamelCase ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
lowerCAmelCase_ = additional_special_tokens_extended
else:
lowerCAmelCase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
lowerCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , mask_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token_sent=_UpperCamelCase , offset=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
lowerCAmelCase_ = mask_token_sent
lowerCAmelCase_ = vocab_file
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
# add special tokens to encoder dict
lowerCAmelCase_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowerCAmelCase_ = {v: k for k, v in self.encoder.items()}
@property
def __a ( self ) -> int:
return len(self.sp_model ) + self.offset
def __a ( self ) -> Dict[str, int]:
lowerCAmelCase_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
lowerCAmelCase_ = self.__dict__.copy()
lowerCAmelCase_ = None
return state
def __setstate__( self , _UpperCamelCase ) -> Optional[int]:
lowerCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self , _UpperCamelCase ) -> List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __a ( self , _UpperCamelCase ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCAmelCase_ = self.sp_model.piece_to_id(_UpperCamelCase )
return sp_id + self.offset
def __a ( self , _UpperCamelCase ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCAmelCase_ = self.sp_model.IdToPiece(index - self.offset )
return token
def __a ( self , _UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase_ = []
lowerCAmelCase_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_UpperCamelCase ) + token
lowerCAmelCase_ = []
else:
current_sub_tokens.append(_UpperCamelCase )
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def __a ( self , _UpperCamelCase=False ) -> Optional[int]:
return 1
def __a ( self , _UpperCamelCase ) -> int:
lowerCAmelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __a ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(_UpperCamelCase )
elif token_ids_a is None:
return self._special_token_mask(_UpperCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __a ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __a ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , "wb" ) as fi:
lowerCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
| 279
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
_lowercase =StableUnCLIPPipeline
_lowercase =TEXT_TO_IMAGE_PARAMS
_lowercase =TEXT_TO_IMAGE_BATCH_PARAMS
_lowercase =TEXT_TO_IMAGE_IMAGE_PARAMS
_lowercase =TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowercase =False
def __a ( self ) -> Dict:
lowerCAmelCase_ = 32
lowerCAmelCase_ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCamelCase , projection_dim=_UpperCamelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
lowerCAmelCase_ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_UpperCamelCase , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase_ = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=_UpperCamelCase , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase_ = StableUnCLIPImageNormalizer(embedding_dim=_UpperCamelCase )
lowerCAmelCase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCamelCase , layers_per_block=1 , upcast_attention=_UpperCamelCase , use_linear_projection=_UpperCamelCase , )
torch.manual_seed(0 )
lowerCAmelCase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL()
lowerCAmelCase_ = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __a ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Tuple:
if str(_UpperCamelCase ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_UpperCamelCase )
else:
lowerCAmelCase_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
lowerCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=_UpperCamelCase )
def __a ( self ) -> str:
lowerCAmelCase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_UpperCamelCase )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowerCAmelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase_ = pipe("anime turle" , generator=_UpperCamelCase , output_type="np" )
lowerCAmelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowerCAmelCase_ = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 279
| 1
|
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
SCREAMING_SNAKE_CASE_ = yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
SCREAMING_SNAKE_CASE_ = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
SCREAMING_SNAKE_CASE_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
SCREAMING_SNAKE_CASE_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
SCREAMING_SNAKE_CASE_ = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
SCREAMING_SNAKE_CASE_ = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
SCREAMING_SNAKE_CASE_ = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
SCREAMING_SNAKE_CASE_ = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
SCREAMING_SNAKE_CASE_ = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
SCREAMING_SNAKE_CASE_ = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
SCREAMING_SNAKE_CASE_ = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
SCREAMING_SNAKE_CASE_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
SCREAMING_SNAKE_CASE_ = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
SCREAMING_SNAKE_CASE_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
SCREAMING_SNAKE_CASE_ = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
SCREAMING_SNAKE_CASE_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
SCREAMING_SNAKE_CASE_ = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
SCREAMING_SNAKE_CASE_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
SCREAMING_SNAKE_CASE_ = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
SCREAMING_SNAKE_CASE_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
SCREAMING_SNAKE_CASE_ = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
SCREAMING_SNAKE_CASE_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
SCREAMING_SNAKE_CASE_ = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
SCREAMING_SNAKE_CASE_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
SCREAMING_SNAKE_CASE_ = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
SCREAMING_SNAKE_CASE_ = ''
SCREAMING_SNAKE_CASE_ = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
SCREAMING_SNAKE_CASE_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
SCREAMING_SNAKE_CASE_ = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCamelCase__ ( _lowercase : List[str] , _lowercase : Optional[Any] ) -> Tuple:
assert ReadMe.from_string(_lowercase , _lowercase ).to_dict() == expected_dict
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCamelCase__ ( _lowercase : int , _lowercase : List[Any] ) -> Union[str, Any]:
with pytest.raises(_lowercase , match=re.escape(expected_error.format(path="""root""" ) ) ):
__UpperCAmelCase: Tuple = ReadMe.from_string(_lowercase , _lowercase )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase__ ( _lowercase : Any , _lowercase : List[str] ) -> List[str]:
with pytest.raises(_lowercase , match=re.escape(expected_error.format(path="""root""" ) ) ):
ReadMe.from_string(_lowercase , _lowercase )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase__ ( _lowercase : Dict ) -> Optional[int]:
ReadMe.from_string(_lowercase , _lowercase , suppress_parsing_errors=_lowercase )
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCamelCase__ ( _lowercase : str , _lowercase : Dict ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase: int = Path(_lowercase ) / """README.md"""
with open(_lowercase , """w+""" ) as readme_file:
readme_file.write(_lowercase )
__UpperCAmelCase: Dict = ReadMe.from_readme(_lowercase , _lowercase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCamelCase__ ( _lowercase : List[str] , _lowercase : int ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase: str = Path(_lowercase ) / """README.md"""
with open(_lowercase , """w+""" ) as readme_file:
readme_file.write(_lowercase )
__UpperCAmelCase: Optional[int] = expected_error.format(path=_lowercase )
with pytest.raises(_lowercase , match=re.escape(_lowercase ) ):
__UpperCAmelCase: Any = ReadMe.from_readme(_lowercase , _lowercase )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase__ ( _lowercase : Any , _lowercase : Tuple ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase: Union[str, Any] = Path(_lowercase ) / """README.md"""
with open(_lowercase , """w+""" ) as readme_file:
readme_file.write(_lowercase )
__UpperCAmelCase: List[str] = expected_error.format(path=_lowercase )
with pytest.raises(_lowercase , match=re.escape(_lowercase ) ):
ReadMe.from_readme(_lowercase , _lowercase )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase__ ( _lowercase : Tuple ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase: Tuple = Path(_lowercase ) / """README.md"""
with open(_lowercase , """w+""" ) as readme_file:
readme_file.write(_lowercase )
ReadMe.from_readme(_lowercase , _lowercase , suppress_parsing_errors=_lowercase )
| 523
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
SCREAMING_SNAKE_CASE_ = ['text', 'image', 'audio']
def UpperCamelCase__ ( _lowercase : List[str] ) -> Tuple:
__UpperCAmelCase: Union[str, Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(_lowercase , _lowercase ):
inputs.append(create_inputs(_lowercase ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def UpperCamelCase__ ( _lowercase : List ) -> List[str]:
__UpperCAmelCase: str = []
for output in outputs:
if isinstance(_lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(_lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(_lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
"""simple docstring"""
def lowercase_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
__UpperCAmelCase: Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__UpperCAmelCase: Dict = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = create_inputs(self.tool.inputs )
__UpperCAmelCase: Any = self.tool(*snake_case_ )
# There is a single output
if len(self.tool.outputs ) == 1:
__UpperCAmelCase: Union[str, Any] = [outputs]
self.assertListEqual(output_types(snake_case_ ) , self.tool.outputs )
def lowercase_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = create_inputs(self.tool.inputs )
__UpperCAmelCase: Optional[int] = self.tool(*snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase: Tuple = [outputs]
self.assertEqual(len(snake_case_ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case_ , self.tool.outputs ):
__UpperCAmelCase: List[Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case_ , snake_case_ ) )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = create_inputs(self.tool.inputs )
__UpperCAmelCase: Optional[int] = []
for _input, input_type in zip(snake_case_ , self.tool.inputs ):
if isinstance(snake_case_ , snake_case_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__UpperCAmelCase: int = self.tool(*snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase: Union[str, Any] = [outputs]
self.assertEqual(len(snake_case_ ) , len(self.tool.outputs ) )
| 523
| 1
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
UpperCAmelCase__ =3
def lowerCAmelCase_ ( UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
print("""Generating primitive root of p""" )
while True:
__lowercase = random.randrange(3 , lowerCamelCase_ )
if pow(lowerCamelCase_ , 2 , lowerCamelCase_ ) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) == 1:
continue
return g
def lowerCAmelCase_ ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
print("""Generating prime p...""" )
__lowercase = rabin_miller.generate_large_prime(lowerCamelCase_ ) # select large prime number.
__lowercase = primitive_root(lowerCamelCase_ ) # one primitive root on modulo p.
__lowercase = random.randrange(3 , lowerCamelCase_ ) # private_key -> have to be greater than 2 for safety.
__lowercase = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
__lowercase = (key_size, e_a, e_a, p)
__lowercase = (key_size, d)
return public_key, private_key
def lowerCAmelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple ):
"""simple docstring"""
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print("""\nWARNING:""" )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
__lowercase = generate_key(lowerCamelCase_ )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' , """w""" ) as fo:
fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' , """w""" ) as fo:
fo.write(f'''{private_key[0]},{private_key[1]}''' )
def lowerCAmelCase_ ( ):
"""simple docstring"""
print("""Making key files...""" )
make_key_files("""elgamal""" , 2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 706
|
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase__ =logging.get_logger(__name__)
UpperCAmelCase__ =OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
UpperCAmelCase__ =OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase__ =OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase__ =OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
UpperCAmelCase__ =OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
UpperCAmelCase__ =OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
UpperCAmelCase__ =OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
UpperCAmelCase__ =OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
UpperCAmelCase__ =OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
UpperCAmelCase__ =OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
UpperCAmelCase__ =OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
UpperCAmelCase__ =OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
UpperCAmelCase__ =OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
UpperCAmelCase__ =OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
UpperCAmelCase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : List[Any] = FLAX_MODEL_MAPPING
UpperCAmelCase__ =auto_class_update(FlaxAutoModel)
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : int = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase__ =auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : Tuple = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase__ =auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase__ =auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : str = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase__ =auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : str = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase__ =auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : Tuple = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase__ =auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase__ =auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : Optional[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase__ =auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase__ =auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : Union[str, Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase__ =auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase__ =auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class lowerCamelCase__ ( _BaseAutoModelClass ):
a : List[str] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase__ =auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 442
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=True , _UpperCAmelCase=1 / 255 , _UpperCAmelCase=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCamelCase_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = min_resolution
UpperCamelCase_ = max_resolution
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean
UpperCamelCase_ = image_std
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_pad
def _UpperCAmelCase ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=False ) -> Tuple:
if not batched:
UpperCamelCase_ = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image ):
UpperCamelCase_ , UpperCamelCase_ = image.size
else:
UpperCamelCase_ , UpperCamelCase_ = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase_ = int(self.size['shortest_edge'] * h / w )
UpperCamelCase_ = self.size['shortest_edge']
elif w > h:
UpperCamelCase_ = self.size['shortest_edge']
UpperCamelCase_ = int(self.size['shortest_edge'] * w / h )
else:
UpperCamelCase_ = self.size['shortest_edge']
UpperCamelCase_ = self.size['shortest_edge']
else:
UpperCamelCase_ = []
for image in image_inputs:
UpperCamelCase_ , UpperCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase_ = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0]
UpperCamelCase_ = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = ConditionalDetrImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = ConditionalDetrImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
UpperCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCAmelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
# Initialize image_processing
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
UpperCamelCase_ = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ) -> int:
# Initialize image_processing
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ) -> Any:
# Initialize image_processing
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _UpperCAmelCase ( self ) -> Any:
# prepare image and target
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
UpperCamelCase_ = json.loads(f.read() )
UpperCamelCase_ = {'image_id': 39769, 'annotations': target}
# encode them
UpperCamelCase_ = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
UpperCamelCase_ = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors='pt' )
# verify pixel values
UpperCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4 ) )
# verify area
UpperCamelCase_ = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCAmelCase ) )
# verify boxes
UpperCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCAmelCase , atol=1e-3 ) )
# verify image_id
UpperCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCAmelCase ) )
# verify is_crowd
UpperCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCAmelCase ) )
# verify class_labels
UpperCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCAmelCase ) )
# verify orig_size
UpperCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCAmelCase ) )
# verify size
UpperCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCAmelCase ) )
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
# prepare image, target and masks_path
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
UpperCamelCase_ = json.loads(f.read() )
UpperCamelCase_ = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
UpperCamelCase_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
UpperCamelCase_ = ConditionalDetrImageProcessor(format='coco_panoptic' )
UpperCamelCase_ = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors='pt' )
# verify pixel values
UpperCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4 ) )
# verify area
UpperCamelCase_ = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCAmelCase ) )
# verify boxes
UpperCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCAmelCase , atol=1e-3 ) )
# verify image_id
UpperCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCAmelCase ) )
# verify is_crowd
UpperCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCAmelCase ) )
# verify class_labels
UpperCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCAmelCase ) )
# verify masks
UpperCamelCase_ = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _UpperCAmelCase )
# verify orig_size
UpperCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCAmelCase ) )
# verify size
UpperCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCAmelCase ) )
| 23
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = AutoencoderKL
__SCREAMING_SNAKE_CASE : Optional[Any] = 'sample'
__SCREAMING_SNAKE_CASE : Any = 1e-2
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = 4
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return (3, 32, 32)
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
lowercase_ = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.prepare_init_args_and_inputs_for_common()
lowercase_ = self.model_class(**UpperCamelCase__ )
model.to(UpperCamelCase__ )
assert not model.is_gradient_checkpointing and model.training
lowercase_ = model(**UpperCamelCase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowercase_ = torch.randn_like(UpperCamelCase__ )
lowercase_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowercase_ = self.model_class(**UpperCamelCase__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(UpperCamelCase__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowercase_ = model_a(**UpperCamelCase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowercase_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
lowercase_ = dict(model.named_parameters() )
lowercase_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCamelCase__ )
lowercase_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
lowercase_ = model.to(UpperCamelCase__ )
model.eval()
if torch_device == "mps":
lowercase_ = torch.manual_seed(0 )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
lowercase_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase_ = image.to(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ , sample_posterior=UpperCamelCase__ , generator=UpperCamelCase__ ).sample
lowercase_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowercase_ = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
lowercase_ = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
lowercase_ = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return F'''gaussian_noise_s={seed}_shape={"_".join([str(UpperCamelCase__ ) for s in shape] )}.npy'''
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str=0 , UpperCamelCase__ : str=(4, 3, 512, 512) , UpperCamelCase__ : str=False ):
'''simple docstring'''
lowercase_ = torch.floataa if fpaa else torch.floataa
lowercase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCamelCase__ , UpperCamelCase__ ) ) ).to(UpperCamelCase__ ).to(UpperCamelCase__ )
return image
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple="CompVis/stable-diffusion-v1-4" , UpperCamelCase__ : Any=False ):
'''simple docstring'''
lowercase_ = """fp16""" if fpaa else None
lowercase_ = torch.floataa if fpaa else torch.floataa
lowercase_ = AutoencoderKL.from_pretrained(
UpperCamelCase__ , subfolder="""vae""" , torch_dtype=UpperCamelCase__ , revision=UpperCamelCase__ , )
model.to(UpperCamelCase__ ).eval()
return model
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Any=0 ):
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(UpperCamelCase__ )
return torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCamelCase__ )
lowercase_ = self.get_generator(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ , generator=UpperCamelCase__ , sample_posterior=UpperCamelCase__ ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase_ = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=UpperCamelCase__ )
lowercase_ = self.get_sd_image(UpperCamelCase__ , fpaa=UpperCamelCase__ )
lowercase_ = self.get_generator(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ , generator=UpperCamelCase__ , sample_posterior=UpperCamelCase__ ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase_ = torch.tensor(UpperCamelCase__ )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase_ = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCamelCase__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase_ = sample[-1, -2:, :2, -2:].flatten().cpu()
lowercase_ = torch.tensor(UpperCamelCase__ )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=UpperCamelCase__ )
lowercase_ = self.get_sd_image(UpperCamelCase__ , shape=(3, 4, 64, 64) , fpaa=UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase_ = torch.tensor(UpperCamelCase__ )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : Dict ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=UpperCamelCase__ )
lowercase_ = self.get_sd_image(UpperCamelCase__ , shape=(3, 4, 64, 64) , fpaa=UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCamelCase__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCamelCase__ )
lowercase_ = self.get_generator(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model.encode(UpperCamelCase__ ).latent_dist
lowercase_ = dist.sample(generator=UpperCamelCase__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowercase_ = sample[0, -1, -3:, -3:].flatten().cpu()
lowercase_ = torch.tensor(UpperCamelCase__ )
lowercase_ = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=UpperCamelCase__ )
| 412
| 0
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_snake_case = logging.get_logger(__name__)
_snake_case = TypeVar('''DatasetType''', Dataset, IterableDataset)
def __lowerCamelCase ( _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = "first_exhausted" , ) -> DatasetType:
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(_lowercase ):
if not isinstance(_lowercase , (Dataset, IterableDataset) ):
if isinstance(_lowercase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_lowercase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_lowercase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowercase ).__name__}.' )
if i == 0:
UpperCamelCase , UpperCamelCase = (
(Dataset, IterableDataset) if isinstance(_lowercase , _lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(_lowercase , _lowercase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_lowercase , _lowercase , _lowercase , info=_lowercase , split=_lowercase , stopping_strategy=_lowercase )
else:
return _interleave_iterable_datasets(
_lowercase , _lowercase , _lowercase , info=_lowercase , split=_lowercase , stopping_strategy=_lowercase )
def __lowerCamelCase ( _lowercase , _lowercase = None , _lowercase = None , _lowercase = 0 , ) -> DatasetType:
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(_lowercase ):
if not isinstance(_lowercase , (Dataset, IterableDataset) ):
if isinstance(_lowercase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_lowercase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_lowercase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowercase ).__name__}.' )
if i == 0:
UpperCamelCase , UpperCamelCase = (
(Dataset, IterableDataset) if isinstance(_lowercase , _lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(_lowercase , _lowercase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_lowercase , info=_lowercase , split=_lowercase , axis=_lowercase )
else:
return _concatenate_iterable_datasets(_lowercase , info=_lowercase , split=_lowercase , axis=_lowercase )
| 170
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] ="roc_bert"
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict=3_05_22 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7_68 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : Tuple=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=30_72 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : Dict=1e-12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : int="absolute" , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : int=9_10 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_12 , SCREAMING_SNAKE_CASE__ : int=2_48_58 , SCREAMING_SNAKE_CASE__ : Tuple=True , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = type_vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = use_cache
UpperCamelCase = enable_pronunciation
UpperCamelCase = enable_shape
UpperCamelCase = pronunciation_embed_dim
UpperCamelCase = pronunciation_vocab_size
UpperCamelCase = shape_embed_dim
UpperCamelCase = shape_vocab_size
UpperCamelCase = concat_input
UpperCamelCase = position_embedding_type
UpperCamelCase = classifier_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 170
| 1
|
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowercase_ = logging.get_logger(__name__)
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__lowerCamelCase : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__lowerCamelCase : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase : bool = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.task_name.lower()
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : List[Any] = 'train'
__lowerCamelCase : Tuple = 'dev'
__lowerCamelCase : Tuple = 'test'
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : GlueDataTrainingArguments
__lowerCamelCase : str
__lowerCamelCase : List[InputFeatures]
def __init__(self , A , A , A = None , A = Split.train , A = None , ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , A , )
_a = args
_a = glue_processors[args.task_name]()
_a = glue_output_modes[args.task_name]
if isinstance(A , A ):
try:
_a = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
_a = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
_a = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_a , _a = label_list[2], label_list[1]
_a = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a = cached_features_file + '''.lock'''
with FileLock(A ):
if os.path.exists(A ) and not args.overwrite_cache:
_a = time.time()
_a = torch.load(A )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
_a = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_a = self.processor.get_test_examples(args.data_dir )
else:
_a = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_a = examples[:limit_length]
_a = glue_convert_examples_to_features(
A , A , max_length=args.max_seq_length , label_list=A , output_mode=self.output_mode , )
_a = time.time()
torch.save(self.features , A )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__(self ) -> Any:
"""simple docstring"""
return len(self.features )
def __getitem__(self , A ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def a__ (self ) -> int:
"""simple docstring"""
return self.label_list
| 11
|
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowercase__ :Union[str, Any] = HfApi()
lowercase__ :Optional[Any] = {}
# fmt: off
lowercase__ :Optional[int] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
lowercase__ :Optional[Any] = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
lowercase__ :Optional[Any] = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
lowercase__ :List[Any] = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
lowercase__ :List[Any] = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
lowercase__ :Optional[int] = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
lowercase__ :Optional[Any] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
lowercase__ :List[str] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
lowercase__ :str = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
lowercase__ :Union[str, Any] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
lowercase__ :List[Any] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
lowercase__ :Optional[Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
lowercase__ :Optional[int] = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
lowercase__ :int = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
lowercase__ :List[str] = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
lowercase__ :Union[str, Any] = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowercase__ :List[Any] = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('CompVis'):
lowercase__ :str = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
lowercase__ :List[str] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowercase__ :Optional[int] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowercase__ :List[Any] = torch.tensor([1_0] * noise.shape[0])
with torch.no_grad():
lowercase__ :int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :3_0], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1E-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 522
| 0
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
# Initialise PyTorch model
SCREAMING_SNAKE_CASE : List[str] = MobileBertConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = MobileBertForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
SCREAMING_SNAKE_CASE : List[Any] = load_tf_weights_in_mobilebert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 308
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_lowerCamelCase : Dict = logging.get_logger(__name__)
@dataclass
class lowercase :
'''simple docstring'''
UpperCAmelCase : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys())})
UpperCAmelCase : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
UpperCAmelCase : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.task_name.lower()
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : List[Any] = 'train'
UpperCAmelCase : Optional[Any] = 'dev'
UpperCAmelCase : Optional[int] = 'test'
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : GlueDataTrainingArguments
UpperCAmelCase : str
UpperCAmelCase : List[InputFeatures]
def __init__( self : Union[str, Any] , snake_case : GlueDataTrainingArguments , snake_case : PreTrainedTokenizerBase , snake_case : Optional[int] = None , snake_case : Union[str, Split] = Split.train , snake_case : Optional[str] = None , ):
'''simple docstring'''
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , snake_case , )
SCREAMING_SNAKE_CASE : Tuple = args
SCREAMING_SNAKE_CASE : int = glue_processors[args.task_name]()
SCREAMING_SNAKE_CASE : str = glue_output_modes[args.task_name]
if isinstance(snake_case , snake_case ):
try:
SCREAMING_SNAKE_CASE : Any = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
SCREAMING_SNAKE_CASE : Any = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE : Union[str, Any] = cached_features_file + '.lock'
with FileLock(snake_case ):
if os.path.exists(snake_case ) and not args.overwrite_cache:
SCREAMING_SNAKE_CASE : Optional[int] = time.time()
SCREAMING_SNAKE_CASE : int = torch.load(snake_case )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
SCREAMING_SNAKE_CASE : str = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
SCREAMING_SNAKE_CASE : Dict = self.processor.get_test_examples(args.data_dir )
else:
SCREAMING_SNAKE_CASE : str = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = examples[:limit_length]
SCREAMING_SNAKE_CASE : Optional[Any] = glue_convert_examples_to_features(
snake_case , snake_case , max_length=args.max_seq_length , label_list=snake_case , output_mode=self.output_mode , )
SCREAMING_SNAKE_CASE : Tuple = time.time()
torch.save(self.features , snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : int ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Dict , snake_case : Optional[int] ):
'''simple docstring'''
return self.features[i]
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.label_list
| 308
| 1
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowerCamelCase = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = """cpu"""
_lowerCamelCase = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
_lowerCamelCase = """path-to-your-trained-model"""
_lowerCamelCase = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowerCamelCase = pipe.to(device)
# to channels last
_lowerCamelCase = pipe.unet.to(memory_format=torch.channels_last)
_lowerCamelCase = pipe.vae.to(memory_format=torch.channels_last)
_lowerCamelCase = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowerCamelCase = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowerCamelCase = torch.randn(2, 4, 64, 64)
_lowerCamelCase = torch.rand(1) * 999
_lowerCamelCase = torch.randn(2, 77, 768)
_lowerCamelCase = (sample, timestep, encoder_hidden_status)
try:
_lowerCamelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowerCamelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCamelCase = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCamelCase = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowerCamelCase = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowerCamelCase = 666
_lowerCamelCase = torch.Generator(device).manual_seed(seed)
_lowerCamelCase = {"""generator""": generator}
if args.steps is not None:
_lowerCamelCase = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowerCamelCase = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 71
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : Dict =DebertaVaTokenizer
UpperCamelCase__ : Union[str, Any] =DebertaVaTokenizerFast
UpperCamelCase__ : Optional[int] =True
UpperCamelCase__ : Tuple =True
def lowerCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : Optional[Any] =DebertaVaTokenizer(lowercase_ , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : Optional[Any] , lowercase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Dict ='this is a test'
_lowerCamelCase : List[Any] ='this is a test'
return input_text, output_text
def lowerCamelCase ( self : int ) -> str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] ='<pad>'
_lowerCamelCase : Union[str, Any] =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def lowerCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(lowercase_ ) , 3_0001 )
def lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def lowerCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =' \tHeLLo!how \n Are yoU? '
_lowerCamelCase : List[Any] =['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
_lowerCamelCase : Any =DebertaVaTokenizer(lowercase_ , do_lower_case=lowercase_ )
_lowerCamelCase : Dict =tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Optional[Any] =DebertaVaTokenizerFast(lowercase_ , do_lower_case=lowercase_ )
_lowerCamelCase : Optional[Any] =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def lowerCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_lowerCamelCase : Optional[Any] ='I was born in 92000, and this is falsé.'
_lowerCamelCase : int =['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_lowerCamelCase : int =DebertaVaTokenizer(lowercase_ , split_by_punct=lowercase_ )
_lowerCamelCase : List[Any] =tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Union[str, Any] =DebertaVaTokenizerFast(lowercase_ , split_by_punct=lowercase_ )
_lowerCamelCase : Tuple =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] ='I was born in 92000, and this is falsé.'
_lowerCamelCase : Union[str, Any] =['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_lowerCamelCase : Optional[int] =DebertaVaTokenizer(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
_lowerCamelCase : Any =tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : List[Any] =DebertaVaTokenizerFast(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
_lowerCamelCase : Optional[int] =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_lowerCamelCase : int ='I was born in 92000, and this is falsé.'
_lowerCamelCase : Union[str, Any] =['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
_lowerCamelCase : Any =DebertaVaTokenizer(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
_lowerCamelCase : Tuple =tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Tuple =DebertaVaTokenizerFast(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
_lowerCamelCase : str =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_lowerCamelCase : Any ='I was born in 92000, and this is falsé.'
_lowerCamelCase : Optional[int] =['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_lowerCamelCase : Optional[Any] =DebertaVaTokenizer(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
_lowerCamelCase : Dict =tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : List[str] =DebertaVaTokenizerFast(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
_lowerCamelCase : Optional[Any] =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : int =' \tHeLLo!how \n Are yoU? '
_lowerCamelCase : Optional[Any] =['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
_lowerCamelCase : Optional[int] =DebertaVaTokenizer(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
_lowerCamelCase : Any =tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Optional[Any] =DebertaVaTokenizerFast(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
_lowerCamelCase : Any =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_lowerCamelCase : List[Any] =self.get_tokenizer()
_lowerCamelCase : Dict =self.get_rust_tokenizer()
_lowerCamelCase : str ='I was born in 92000, and this is falsé.'
_lowerCamelCase : Optional[Any] =tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
_lowerCamelCase : Union[str, Any] =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : List[str] =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_lowerCamelCase : Optional[int] =rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Any =self.get_rust_tokenizer()
_lowerCamelCase : List[Any] =tokenizer.encode(lowercase_ )
_lowerCamelCase : str =rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] ='This is a test'
_lowerCamelCase : Optional[int] =[13, 1, 4398, 25, 21, 1289]
_lowerCamelCase : List[Any] =['▁', 'T', 'his', '▁is', '▁a', '▁test']
_lowerCamelCase : Optional[int] =['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
_lowerCamelCase : Optional[Any] =DebertaVaTokenizer(lowercase_ , keep_accents=lowercase_ )
_lowerCamelCase : Any =DebertaVaTokenizerFast(lowercase_ , keep_accents=lowercase_ )
_lowerCamelCase : Any =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Tuple =tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Union[str, Any] =tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Dict =rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Optional[int] =rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Any =rust_tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# fmt: off
_lowerCamelCase : Optional[Any] ='I was born in 92000, and this is falsé.'
_lowerCamelCase : str =[13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
_lowerCamelCase : Optional[int] =['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
_lowerCamelCase : Dict =['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
_lowerCamelCase : Any =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : List[Any] =tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Optional[Any] =tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Union[str, Any] =rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : List[str] =rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Union[str, Any] =rust_tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_lowerCamelCase : int =DebertaVaTokenizer(lowercase_ )
_lowerCamelCase : str =tokenizer.encode('sequence builders' )
_lowerCamelCase : List[Any] =tokenizer.encode('multi-sequence build' )
_lowerCamelCase : List[Any] =tokenizer.build_inputs_with_special_tokens(lowercase_ )
_lowerCamelCase : Dict =tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowercase_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowercase_ , )
@slow
def lowerCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Dict ={'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 464
| 0
|
import unittest
from knapsack import knapsack as k
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = 0
_UpperCamelCase = [0]
_UpperCamelCase = [0]
_UpperCamelCase = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
_UpperCamelCase = [60]
_UpperCamelCase = [10]
_UpperCamelCase = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = 3
_UpperCamelCase = [1, 2, 3]
_UpperCamelCase = [3, 2, 1]
_UpperCamelCase = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 5 )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = 50
_UpperCamelCase = [60, 100, 120]
_UpperCamelCase = [10, 20, 30]
_UpperCamelCase = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 719
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "gpt_neox"
def __init__( self : Union[str, Any] , _A : Union[str, Any]=5_0432 , _A : List[Any]=6144 , _A : int=44 , _A : int=64 , _A : Optional[Any]=2_4576 , _A : Any="gelu" , _A : Tuple=0.25 , _A : Union[str, Any]=1_0000 , _A : Tuple=0.0 , _A : Any=0.0 , _A : int=0.1 , _A : List[str]=2048 , _A : Dict=0.02 , _A : Optional[Any]=1e-5 , _A : Tuple=True , _A : List[Any]=0 , _A : Optional[int]=2 , _A : Optional[int]=False , _A : List[Any]=True , _A : Any=None , **_A : Any , ):
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = rotary_pct
_UpperCamelCase = rotary_emb_base
_UpperCamelCase = attention_dropout
_UpperCamelCase = hidden_dropout
_UpperCamelCase = classifier_dropout
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = use_cache
_UpperCamelCase = tie_word_embeddings
_UpperCamelCase = use_parallel_residual
_UpperCamelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def UpperCamelCase_ ( self : str ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
_UpperCamelCase = self.rope_scaling.get('''type''' , _A )
_UpperCamelCase = self.rope_scaling.get('''factor''' , _A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 71
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ : Union[str, Any] = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["""ChineseCLIPFeatureExtractor"""]
lowerCamelCase__ : Optional[int] = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Tuple ="""unispeech-sat"""
def __init__( self , lowerCAmelCase__=32 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__="group" , lowerCAmelCase__="gelu" , lowerCAmelCase__=(512, 512, 512, 512, 512, 512, 512) , lowerCAmelCase__=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__=False , lowerCAmelCase__=128 , lowerCAmelCase__=16 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.05 , lowerCAmelCase__=10 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0 , lowerCAmelCase__=10 , lowerCAmelCase__=0 , lowerCAmelCase__=320 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , lowerCAmelCase__=100 , lowerCAmelCase__=256 , lowerCAmelCase__=256 , lowerCAmelCase__=0.1 , lowerCAmelCase__="mean" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=256 , lowerCAmelCase__=(512, 512, 512, 512, 1500) , lowerCAmelCase__=(5, 3, 3, 1, 1) , lowerCAmelCase__=(1, 2, 3, 1, 1) , lowerCAmelCase__=512 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=504 , **lowerCAmelCase__ , ) -> List[str]:
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
a : Any = hidden_size
a : Tuple = feat_extract_norm
a : Tuple = feat_extract_activation
a : Dict = list(lowerCAmelCase__ )
a : int = list(lowerCAmelCase__ )
a : Optional[Any] = list(lowerCAmelCase__ )
a : int = conv_bias
a : str = num_conv_pos_embeddings
a : Dict = num_conv_pos_embedding_groups
a : Optional[int] = len(self.conv_dim )
a : int = num_hidden_layers
a : Any = intermediate_size
a : Any = hidden_act
a : List[Any] = num_attention_heads
a : Any = hidden_dropout
a : Union[str, Any] = attention_dropout
a : Tuple = activation_dropout
a : Dict = feat_proj_dropout
a : Optional[Any] = final_dropout
a : Union[str, Any] = layerdrop
a : str = layer_norm_eps
a : Optional[int] = initializer_range
a : Optional[int] = vocab_size
a : str = num_clusters
a : Any = do_stable_layer_norm
a : str = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : List[Any] = apply_spec_augment
a : int = mask_time_prob
a : Optional[int] = mask_time_length
a : Dict = mask_time_min_masks
a : Optional[int] = mask_feature_prob
a : List[str] = mask_feature_length
a : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
a : List[str] = num_codevectors_per_group
a : List[Any] = num_codevector_groups
a : Tuple = contrastive_logits_temperature
a : int = feat_quantizer_dropout
a : Optional[Any] = num_negatives
a : Optional[int] = codevector_dim
a : Tuple = proj_codevector_dim
a : Optional[int] = diversity_loss_weight
# ctc loss
a : Dict = ctc_loss_reduction
a : Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a : Union[str, Any] = list(lowerCAmelCase__ )
a : List[str] = list(lowerCAmelCase__ )
a : Tuple = list(lowerCAmelCase__ )
a : Optional[int] = xvector_output_dim
@property
def __a ( self ) -> Any:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 633
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Optional[int] = "vit"
def __init__( self : Tuple , snake_case__ : int=7_6_8 , snake_case__ : Optional[int]=1_2 , snake_case__ : str=1_2 , snake_case__ : Tuple=3_0_7_2 , snake_case__ : Optional[int]="gelu" , snake_case__ : Optional[int]=0.0 , snake_case__ : str=0.0 , snake_case__ : str=0.02 , snake_case__ : Union[str, Any]=1e-1_2 , snake_case__ : int=2_2_4 , snake_case__ : Optional[int]=1_6 , snake_case__ : Optional[int]=3 , snake_case__ : List[str]=True , snake_case__ : Tuple=1_6 , **snake_case__ : str , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase :Tuple = hidden_size
lowercase :Optional[int] = num_hidden_layers
lowercase :Optional[int] = num_attention_heads
lowercase :Any = intermediate_size
lowercase :Union[str, Any] = hidden_act
lowercase :Any = hidden_dropout_prob
lowercase :Dict = attention_probs_dropout_prob
lowercase :Tuple = initializer_range
lowercase :int = layer_norm_eps
lowercase :int = image_size
lowercase :List[Any] = patch_size
lowercase :str = num_channels
lowercase :Optional[int] = qkv_bias
lowercase :Optional[Any] = encoder_stride
class __magic_name__ ( __UpperCAmelCase ):
__A : List[str] = version.parse("1.11" )
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return 1e-4
| 700
|
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__A : List[str] = VQModel
__A : Any = "sample"
@property
def __snake_case ( self : int , snake_case__ : int=(3_2, 3_2) ):
'''simple docstring'''
lowercase :Optional[int] = 4
lowercase :Tuple = 3
lowercase :List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case__ )
return {"sample": image}
@property
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return (3, 3_2, 3_2)
@property
def __snake_case ( self : List[str] ):
'''simple docstring'''
return (3, 3_2, 3_2)
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Optional[int] = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
lowercase :Dict = self.dummy_input
return init_dict, inputs_dict
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
pass
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase , lowercase :Optional[int] = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(snake_case__ )
lowercase :List[str] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Optional[int] = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(snake_case__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowercase :List[Any] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowercase :int = image.to(snake_case__ )
with torch.no_grad():
lowercase :str = model(snake_case__ ).sample
lowercase :List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowercase :Tuple = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
| 475
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : List[str] = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Any = "roberta-prelayernorm"
def __init__( self : int , UpperCAmelCase_ : int=50265 , UpperCAmelCase_ : Optional[Any]=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : List[str]=3072 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : Tuple=1E-12 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Tuple="absolute" , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ):
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : str = hidden_act
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : Any = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : int = layer_norm_eps
lowerCAmelCase : List[Any] = position_embedding_type
lowerCAmelCase : List[str] = use_cache
lowerCAmelCase : int = classifier_dropout
class __A ( lowerCAmelCase ):
@property
def lowercase__ ( self : Tuple ):
if self.task == "multiple-choice":
lowerCAmelCase : Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 343
|
__A : Optional[Any] = '''Input must be a string of 8 numbers plus letter'''
__A : str = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> bool:
'''simple docstring'''
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ):
lowerCAmelCase : str = f"Expected string as input, found {type(_UpperCAmelCase ).__name__}"
raise TypeError(_UpperCAmelCase )
lowerCAmelCase : Dict = spanish_id.replace('-', '' ).upper()
if len(_UpperCAmelCase ) != 9:
raise ValueError(_UpperCAmelCase )
try:
lowerCAmelCase : Tuple = int(spanish_id_clean[0:8] )
lowerCAmelCase : Union[str, Any] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_UpperCAmelCase ) from ex
if letter.isdigit():
raise ValueError(_UpperCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343
| 1
|
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = jnp.floataa
SCREAMING_SNAKE_CASE__ = True
def UpperCAmelCase_ (self ):
super().setup()
UpperCamelCase__ = nn.Dense(5 , dtype=self.dtype )
def __call__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxBigBirdForNaturalQuestionsModule
def __magic_name__ ( __a : Union[str, Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Dict , __a : Optional[Any] , __a : Optional[int] ):
'''simple docstring'''
def cross_entropy(__a : Optional[Any] , __a : List[str] , __a : str=None ):
UpperCamelCase__ = logits.shape[-1]
UpperCamelCase__ = (labels[..., None] == jnp.arange(__a )[None]).astype("""f4""" )
UpperCamelCase__ = jax.nn.log_softmax(__a , axis=-1 )
UpperCamelCase__ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
UpperCamelCase__ = reduction(__a )
return loss
UpperCamelCase__ = partial(__a , reduction=jnp.mean )
UpperCamelCase__ = cross_entropy(__a , __a )
UpperCamelCase__ = cross_entropy(__a , __a )
UpperCamelCase__ = cross_entropy(__a , __a )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __A:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = "google/bigbird-roberta-base"
SCREAMING_SNAKE_CASE__ = 3000
SCREAMING_SNAKE_CASE__ = 10500
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 5
# tx_args
SCREAMING_SNAKE_CASE__ = 3e-5
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = 20000
SCREAMING_SNAKE_CASE__ = 0.0095
SCREAMING_SNAKE_CASE__ = "bigbird-roberta-natural-questions"
SCREAMING_SNAKE_CASE__ = "training-expt"
SCREAMING_SNAKE_CASE__ = "data/nq-training.jsonl"
SCREAMING_SNAKE_CASE__ = "data/nq-validation.jsonl"
def UpperCAmelCase_ (self ):
os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = os.path.join(self.base_dir , self.save_dir )
UpperCamelCase__ = self.batch_size_per_device * jax.device_count()
@dataclass
class __A:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 4096 # no dynamic padding on TPUs
def __call__(self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.collate_fn(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return batch
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ , UpperCamelCase__ = self.fetch_inputs(features["""input_ids"""] )
UpperCamelCase__ = {
"""input_ids""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = [self._fetch_inputs(SCREAMING_SNAKE_CASE_ ) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = [1 for _ in range(len(SCREAMING_SNAKE_CASE_ ) )]
while len(SCREAMING_SNAKE_CASE_ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __magic_name__ ( __a : List[Any] , __a : str , __a : Tuple=None ):
'''simple docstring'''
if seed is not None:
UpperCamelCase__ = dataset.shuffle(seed=__a )
for i in range(len(__a ) // batch_size ):
UpperCamelCase__ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__a )
@partial(jax.pmap , axis_name="""batch""" )
def __magic_name__ ( __a : Any , __a : Dict , **__a : Union[str, Any] ):
'''simple docstring'''
def loss_fn(__a : Optional[Any] ):
UpperCamelCase__ = model_inputs.pop("""start_labels""" )
UpperCamelCase__ = model_inputs.pop("""end_labels""" )
UpperCamelCase__ = model_inputs.pop("""pooled_labels""" )
UpperCamelCase__ = state.apply_fn(**__a , params=__a , dropout_rng=__a , train=__a )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = outputs
return state.loss_fn(
__a , __a , __a , __a , __a , __a , )
UpperCamelCase__ , UpperCamelCase__ = jax.random.split(__a )
UpperCamelCase__ = jax.value_and_grad(__a )
UpperCamelCase__ , UpperCamelCase__ = grad_fn(state.params )
UpperCamelCase__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
UpperCamelCase__ = jax.lax.pmean(__a , """batch""" )
UpperCamelCase__ = state.apply_gradients(grads=__a )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def __magic_name__ ( __a : int , **__a : List[str] ):
'''simple docstring'''
UpperCamelCase__ = model_inputs.pop("""start_labels""" )
UpperCamelCase__ = model_inputs.pop("""end_labels""" )
UpperCamelCase__ = model_inputs.pop("""pooled_labels""" )
UpperCamelCase__ = state.apply_fn(**__a , params=state.params , train=__a )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = outputs
UpperCamelCase__ = state.loss_fn(__a , __a , __a , __a , __a , __a )
UpperCamelCase__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class __A( train_state.TrainState ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = struct.field(pytree_node=__lowerCamelCase )
@dataclass
class __A:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
UpperCamelCase__ = model.params
UpperCamelCase__ = TrainState.create(
apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , )
if ckpt_dir is not None:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
UpperCamelCase__ , UpperCamelCase__ = build_tx(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = train_state.TrainState(
step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = args
UpperCamelCase__ = data_collator
UpperCamelCase__ = lr
UpperCamelCase__ = params
UpperCamelCase__ = jax_utils.replicate(SCREAMING_SNAKE_CASE_ )
return state
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.args
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) // args.batch_size
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count() )
for epoch in range(args.max_epochs ):
UpperCamelCase__ = jnp.array(0 , dtype=jnp.floataa )
UpperCamelCase__ = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=F"Running EPOCH-{epoch}" ):
UpperCamelCase__ = self.data_collator(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
UpperCamelCase__ = jax_utils.unreplicate(state.step )
UpperCamelCase__ = running_loss.item() / i
UpperCamelCase__ = self.scheduler_fn(state_step - 1 )
UpperCamelCase__ = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE_ ) )
self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"-e{epoch}-s{i}" , state=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size )
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) // self.args.batch_size
UpperCamelCase__ = jnp.array(0 , dtype=jnp.floataa )
UpperCamelCase__ = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc="""Evaluating ... """ ):
UpperCamelCase__ = self.data_collator(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_ )
print(F"SAVING CHECKPOINT IN {save_dir}" , end=""" ... """ )
self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , """data_collator.joblib""" ) )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE_ )
print("""DONE""" )
def __magic_name__ ( __a : List[str] , __a : str ):
'''simple docstring'''
print(f"RESTORING CHECKPOINT FROM {save_dir}" , end=""" ... """ )
with open(os.path.join(__a , """flax_model.msgpack""" ) , """rb""" ) as f:
UpperCamelCase__ = from_bytes(state.params , f.read() )
with open(os.path.join(__a , """opt_state.msgpack""" ) , """rb""" ) as f:
UpperCamelCase__ = from_bytes(state.opt_state , f.read() )
UpperCamelCase__ = joblib.load(os.path.join(__a , """args.joblib""" ) )
UpperCamelCase__ = joblib.load(os.path.join(__a , """data_collator.joblib""" ) )
with open(os.path.join(__a , """training_state.json""" ) , """r""" ) as f:
UpperCamelCase__ = json.load(__a )
UpperCamelCase__ = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def __magic_name__ ( __a : Dict , __a : int , __a : Tuple , __a : Tuple ):
'''simple docstring'''
UpperCamelCase__ = num_train_steps - warmup_steps
UpperCamelCase__ = optax.linear_schedule(init_value=__a , end_value=__a , transition_steps=__a )
UpperCamelCase__ = optax.linear_schedule(init_value=__a , end_value=1E-7 , transition_steps=__a )
UpperCamelCase__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __magic_name__ ( __a : int , __a : List[Any] , __a : int , __a : Optional[int] , __a : Optional[Any] ):
'''simple docstring'''
def weight_decay_mask(__a : Tuple ):
UpperCamelCase__ = traverse_util.flatten_dict(__a )
UpperCamelCase__ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(__a )
UpperCamelCase__ = scheduler_fn(__a , __a , __a , __a )
UpperCamelCase__ = optax.adamw(learning_rate=__a , weight_decay=__a , mask=__a )
return tx, lr
| 86
|
def __magic_name__ ( __a : str , __a : str ):
'''simple docstring'''
UpperCamelCase__ = len(__a )
UpperCamelCase__ = len(__a )
UpperCamelCase__ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
UpperCamelCase__ = True
for i in range(__a ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCamelCase__ = True
if a[i].islower():
UpperCamelCase__ = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86
| 1
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : List[str] = (DDIMParallelScheduler,)
UpperCamelCase_ : Optional[Any] = (('eta', 0.0), ('num_inference_steps', 50))
def UpperCAmelCase_ ( self : Tuple , **lowerCamelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**lowerCamelCase__ )
return config
def UpperCAmelCase_ ( self : str , **lowerCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(**lowerCamelCase__ )
__lowercase = scheduler_class(**lowerCamelCase__ )
__lowercase , __lowercase = 10, 0.0
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for t in scheduler.timesteps:
__lowercase = model(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
return sample
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(steps_offset=1 )
__lowercase = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase__ , beta_end=lowerCamelCase__ )
def UpperCAmelCase_ ( self : int ) -> Dict:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> int:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Any:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCamelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase__ , prediction_type=lowerCamelCase__ , sample_max_value=lowerCamelCase__ , )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ )
def UpperCAmelCase_ ( self : int ) -> Dict:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCamelCase__ , eta=lowerCamelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4_7_7_1 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2_4_6_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.0_2 ) ) < 1e-5
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCamelCase__ )
__lowercase , __lowercase = 10, 0.0
scheduler.set_timesteps(lowerCamelCase__ )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = self.dummy_sample_deter + 0.1
__lowercase = self.dummy_sample_deter - 0.1
__lowercase = samplea.shape[0]
__lowercase = torch.stack([samplea, samplea, samplea] , dim=0 )
__lowercase = torch.arange(lowerCamelCase__ )[0:3, None].repeat(1 , lowerCamelCase__ )
__lowercase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__lowercase = scheduler.batch_step_no_noise(lowerCamelCase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowerCamelCase__ )
__lowercase = torch.sum(torch.abs(lowerCamelCase__ ) )
__lowercase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1e-2
assert abs(result_mean.item() - 0.4_9_8_2 ) < 1e-3
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = self.full_loop()
__lowercase = torch.sum(torch.abs(lowerCamelCase__ ) )
__lowercase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_9_6_7 ) < 1e-3
def UpperCAmelCase_ ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = self.full_loop(prediction_type='''v_prediction''' )
__lowercase = torch.sum(torch.abs(lowerCamelCase__ ) )
__lowercase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 5_2.5_3_0_2 ) < 1e-2
assert abs(result_mean.item() - 0.0_6_8_4 ) < 1e-3
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.full_loop(set_alpha_to_one=lowerCamelCase__ , beta_start=0.0_1 )
__lowercase = torch.sum(torch.abs(lowerCamelCase__ ) )
__lowercase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1e-2
assert abs(result_mean.item() - 0.1_9_5_1 ) < 1e-3
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = self.full_loop(set_alpha_to_one=lowerCamelCase__ , beta_start=0.0_1 )
__lowercase = torch.sum(torch.abs(lowerCamelCase__ ) )
__lowercase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1e-2
assert abs(result_mean.item() - 0.1_9_4_1 ) < 1e-3
| 332
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Any ) -> int:
"""simple docstring"""
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
requires_backends(self , '''vision''' )
self.check_model_type(lowerCamelCase__ )
def __call__( self : Optional[Any] , lowerCamelCase__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase__ : str ) -> str:
"""simple docstring"""
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , **lowerCamelCase__ : Dict ) -> Optional[int]:
"""simple docstring"""
return {}, {}, {}
def UpperCAmelCase_ ( self : Union[str, Any] , lowerCamelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = load_image(lowerCamelCase__ )
__lowercase = image.size
__lowercase = self.image_processor(images=lowerCamelCase__ , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowerCamelCase__ : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = self.model(**lowerCamelCase__ )
return model_outputs
def UpperCAmelCase_ ( self : Optional[int] , lowerCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = model_outputs.predicted_depth
__lowercase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=lowerCamelCase__ )
__lowercase = prediction.squeeze().cpu().numpy()
__lowercase = (output * 255 / np.max(lowerCamelCase__ )).astype('''uint8''' )
__lowercase = Image.fromarray(lowerCamelCase__ )
__lowercase = {}
__lowercase = predicted_depth
__lowercase = depth
return output_dict
| 332
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = """xlnet"""
__lowercase: Optional[Any] = ["""mems"""]
__lowercase: List[Any] = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , UpperCAmelCase_ : Tuple=32_000 , UpperCAmelCase_ : str=1_024 , UpperCAmelCase_ : Union[str, Any]=24 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : int=4_096 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]="bi" , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : str=1E-12 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=-1 , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[Any]="last" , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Tuple="tanh" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Optional[int]=5 , UpperCAmelCase_ : Optional[int]=5 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Any=2 , **UpperCAmelCase_ : int , ) ->List[str]:
"""simple docstring"""
snake_case_ = vocab_size
snake_case_ = d_model
snake_case_ = n_layer
snake_case_ = n_head
if d_model % n_head != 0:
raise ValueError(F"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
snake_case_ = d_model // n_head
snake_case_ = ff_activation
snake_case_ = d_inner
snake_case_ = untie_r
snake_case_ = attn_type
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = dropout
snake_case_ = mem_len
snake_case_ = reuse_len
snake_case_ = bi_data
snake_case_ = clamp_len
snake_case_ = same_length
snake_case_ = summary_type
snake_case_ = summary_use_proj
snake_case_ = summary_activation
snake_case_ = summary_last_dropout
snake_case_ = start_n_top
snake_case_ = end_n_top
snake_case_ = bos_token_id
snake_case_ = pad_token_id
snake_case_ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , UpperCAmelCase_ , )
snake_case_ = kwargs["""use_cache"""]
snake_case_ = use_mems_eval
snake_case_ = use_mems_train
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : str ) ->str:
"""simple docstring"""
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 713
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[str]=99 , UpperCAmelCase_ : Dict=24 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[Any]=6 , UpperCAmelCase_ : int=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : str=16 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Any=1_000 , ) ->Tuple:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = range_bbox
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ = bbox[i, j, 3]
snake_case_ = bbox[i, j, 1]
snake_case_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ = bbox[i, j, 2]
snake_case_ = bbox[i, j, 0]
snake_case_ = t
snake_case_ = None
if self.use_input_mask:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , ) ->str:
"""simple docstring"""
snake_case_ = LiltModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , ) ->Dict:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = LiltForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , ) ->Dict:
"""simple docstring"""
snake_case_ = LiltForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __A (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[int] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase: Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase: Union[str, Any] = False
__lowercase: List[str] = False
def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
return True
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = LiltModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = LiltModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_torch
@slow
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(UpperCAmelCase_ )
snake_case_ = torch.tensor([[1, 2]] , device=UpperCAmelCase_ )
snake_case_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCAmelCase_ )
# forward pass
with torch.no_grad():
snake_case_ = model(input_ids=UpperCAmelCase_ , bbox=UpperCAmelCase_ )
snake_case_ = torch.Size([1, 2, 768] )
snake_case_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=UpperCAmelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCAmelCase_ , atol=1E-3 ) )
| 2
| 0
|
def snake_case ( lowerCamelCase = 2_000_000 ):
'''simple docstring'''
__lowercase = [0 for i in range(n + 1 )]
__lowercase = 1
__lowercase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowerCamelCase ):
__lowercase = 1
__lowercase = 0
for i in range(lowerCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 80
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Tuple = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
| 0
|
A_ = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def __UpperCamelCase ( a) ->int:
lowerCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
A_ = [None] * 1_0_0_0_0_0_0_0
A_ = True
A_ = False
def __UpperCamelCase ( a) ->bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase__ = chain(next_number(a))
lowerCamelCase__ = number_chain
while number < 10000000:
lowerCamelCase__ = number_chain
number *= 10
return number_chain
def __UpperCamelCase ( a = 10000000) ->int:
for i in range(1, a):
if CHAINS[i] is None:
chain(i + 1)
return CHAINS[:number].count(a)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 360
|
def __UpperCamelCase ( a = 100) ->int:
lowerCamelCase__ = (n * (n + 1) // 2) ** 2
lowerCamelCase__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 360
| 1
|
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_lowerCAmelCase : str = numpy.array([0, 0])
_lowerCAmelCase : Dict = numpy.array([0.5, 0.8_6_6_0_2_5_4])
_lowerCAmelCase : str = numpy.array([1, 0])
_lowerCAmelCase : int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __snake_case ( SCREAMING_SNAKE_CASE__ : list[numpy.ndarray] , SCREAMING_SNAKE_CASE__ : int ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = initial_vectors
for _ in range(SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : List[str] = iteration_step(SCREAMING_SNAKE_CASE__ )
return vectors
def __snake_case ( SCREAMING_SNAKE_CASE__ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCAmelCase : Any = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCAmelCase : List[str] = vectors[i + 1]
new_vectors.append(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Tuple = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __snake_case ( SCREAMING_SNAKE_CASE__ : numpy.ndarray , SCREAMING_SNAKE_CASE__ : float ) -> numpy.ndarray:
'''simple docstring'''
_UpperCAmelCase : Tuple = numpy.radians(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = numpy.cos(SCREAMING_SNAKE_CASE__ ), numpy.sin(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Union[str, Any] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : list[numpy.ndarray] ) -> None:
'''simple docstring'''
_UpperCAmelCase : Any = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCAmelCase , _UpperCAmelCase : Tuple = zip(*SCREAMING_SNAKE_CASE__ )
plt.plot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 289
|
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCAmelCase : Any = HfApi()
_lowerCAmelCase : int = {}
# fmt: off
_lowerCAmelCase : List[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
_lowerCAmelCase : Any = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
_lowerCAmelCase : List[str] = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
_lowerCAmelCase : Optional[int] = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
_lowerCAmelCase : List[Any] = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
_lowerCAmelCase : Optional[int] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
_lowerCAmelCase : Optional[Any] = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
_lowerCAmelCase : List[str] = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
_lowerCAmelCase : Any = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
_lowerCAmelCase : int = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
_lowerCAmelCase : Union[str, Any] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
_lowerCAmelCase : str = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
_lowerCAmelCase : Optional[int] = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
_lowerCAmelCase : Union[str, Any] = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
_lowerCAmelCase : Any = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
_lowerCAmelCase : Optional[Any] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCAmelCase : List[Any] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(F"Started running {mod.modelId}!!!")
if mod.modelId.startswith("CompVis"):
_lowerCAmelCase : Tuple = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_lowerCAmelCase : List[Any] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCAmelCase : Optional[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCAmelCase : Tuple = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(F"{mod.modelId} has passed successfully!!!")
| 289
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
def __A ( self : Optional[Any] ):
lowerCAmelCase_ : Dict =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''num_attention_heads''' ) )
class _snake_case :
"""simple docstring"""
def __init__( self : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]=13 , UpperCamelCase_ : Union[str, Any]=32 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=640 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int="silu" , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Any=0.0_2 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[Any]=10 , UpperCamelCase_ : List[Any]=None , ):
lowerCAmelCase_ : List[str] =parent
lowerCAmelCase_ : Tuple =batch_size
lowerCAmelCase_ : Tuple =image_size
lowerCAmelCase_ : Any =patch_size
lowerCAmelCase_ : Any =num_channels
lowerCAmelCase_ : Dict =last_hidden_size
lowerCAmelCase_ : Optional[int] =num_attention_heads
lowerCAmelCase_ : str =hidden_act
lowerCAmelCase_ : Dict =conv_kernel_size
lowerCAmelCase_ : int =output_stride
lowerCAmelCase_ : Tuple =hidden_dropout_prob
lowerCAmelCase_ : Optional[Any] =attention_probs_dropout_prob
lowerCAmelCase_ : List[str] =classifier_dropout_prob
lowerCAmelCase_ : int =use_labels
lowerCAmelCase_ : Dict =is_training
lowerCAmelCase_ : Any =num_labels
lowerCAmelCase_ : Optional[Any] =initializer_range
lowerCAmelCase_ : List[str] =scope
def __A ( self : int ):
lowerCAmelCase_ : Any =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Any =None
lowerCAmelCase_ : Optional[Any] =None
if self.use_labels:
lowerCAmelCase_ : Optional[int] =ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ : Optional[Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase_ : str =self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self : Optional[Any] ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __A ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any ):
lowerCAmelCase_ : Optional[Any] =MobileViTModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase_ : Optional[Any] =model(UpperCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Any ):
lowerCAmelCase_ : List[str] =self.num_labels
lowerCAmelCase_ : Tuple =MobileViTForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase_ : str =model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase_ : Optional[int] =self.num_labels
lowerCAmelCase_ : Any =MobileViTForSemanticSegmentation(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase_ : Union[str, Any] =model(UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase_ : Union[str, Any] =model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self : Dict ):
lowerCAmelCase_ : Any =self.prepare_config_and_inputs()
lowerCAmelCase_ : int =config_and_inputs
lowerCAmelCase_ : Tuple ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCamelCase : List[str] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Any = False
def __A ( self : List[str] ):
lowerCAmelCase_ : str =MobileViTModelTester(self )
lowerCAmelCase_ : int =MobileViTConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def __A ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def __A ( self : int ):
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def __A ( self : Optional[int] ):
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def __A ( self : List[str] ):
pass
def __A ( self : Tuple ):
lowerCAmelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[Any] =model_class(UpperCamelCase_ )
lowerCAmelCase_ : List[str] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[str] =[*signature.parameters.keys()]
lowerCAmelCase_ : Tuple =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self : Tuple ):
pass
def __A ( self : Optional[int] ):
lowerCAmelCase_ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __A ( self : Dict ):
def check_hidden_states_output(UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] =model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : List[str] =model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase_ : Dict =outputs.hidden_states
lowerCAmelCase_ : Union[str, Any] =5
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCAmelCase_ : Union[str, Any] =2
for i in range(len(UpperCamelCase_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCAmelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any =True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : List[str] =True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __A ( self : int ):
lowerCAmelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
def __A ( self : Optional[Any] ):
lowerCAmelCase_ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ )
@slow
def __A ( self : List[Any] ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Any =MobileViTModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( ):
lowerCAmelCase_ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : Dict ):
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def __A ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple =MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(UpperCamelCase_ )
lowerCAmelCase_ : Optional[Any] =self.default_image_processor
lowerCAmelCase_ : Optional[int] =prepare_img()
lowerCAmelCase_ : Union[str, Any] =image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : int =model(**UpperCamelCase_ )
# verify the logits
lowerCAmelCase_ : Optional[int] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowerCAmelCase_ : List[Any] =torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def __A ( self : Union[str, Any] ):
lowerCAmelCase_ : List[str] =MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCAmelCase_ : Tuple =model.to(UpperCamelCase_ )
lowerCAmelCase_ : Any =MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCAmelCase_ : int =prepare_img()
lowerCAmelCase_ : Optional[int] =image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] =model(**UpperCamelCase_ )
lowerCAmelCase_ : List[str] =outputs.logits
# verify the logits
lowerCAmelCase_ : Dict =torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase_ )
lowerCAmelCase_ : List[str] =torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9_8_6_8, -9.7_1_3_2], [-11.0405, -11.0221, -10.7318]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] , device=UpperCamelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def __A ( self : Tuple ):
lowerCAmelCase_ : Optional[int] =MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCAmelCase_ : str =model.to(UpperCamelCase_ )
lowerCAmelCase_ : int =MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCAmelCase_ : Union[str, Any] =prepare_img()
lowerCAmelCase_ : str =image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Optional[int] =model(**UpperCamelCase_ )
lowerCAmelCase_ : str =outputs.logits.detach().cpu()
lowerCAmelCase_ : Any =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ , target_sizes=[(50, 60)] )
lowerCAmelCase_ : Optional[int] =torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase_ )
lowerCAmelCase_ : Tuple =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ )
lowerCAmelCase_ : List[str] =torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase_ )
| 703
|
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = '''owlvit_text_model'''
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=49408 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2048 , UpperCamelCase_ : Optional[int]=12 , UpperCamelCase_ : List[str]=8 , UpperCamelCase_ : List[str]=16 , UpperCamelCase_ : List[str]="quick_gelu" , UpperCamelCase_ : Any=1E-5 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : Optional[Any]=0.0_2 , UpperCamelCase_ : Tuple=1.0 , UpperCamelCase_ : int=0 , UpperCamelCase_ : Optional[int]=49406 , UpperCamelCase_ : str=49407 , **UpperCamelCase_ : Tuple , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase_ : Dict =vocab_size
lowerCAmelCase_ : Any =hidden_size
lowerCAmelCase_ : List[Any] =intermediate_size
lowerCAmelCase_ : Union[str, Any] =num_hidden_layers
lowerCAmelCase_ : List[str] =num_attention_heads
lowerCAmelCase_ : Optional[Any] =max_position_embeddings
lowerCAmelCase_ : str =hidden_act
lowerCAmelCase_ : Dict =layer_norm_eps
lowerCAmelCase_ : Dict =attention_dropout
lowerCAmelCase_ : Tuple =initializer_range
lowerCAmelCase_ : str =initializer_factor
@classmethod
def __A ( cls : str , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : Any ):
cls._set_token_in_kwargs(UpperCamelCase_ )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] =cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
lowerCAmelCase_ : Optional[Any] =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : Optional[int] = '''owlvit_vision_model'''
def __init__( self : int , UpperCamelCase_ : Tuple=768 , UpperCamelCase_ : Union[str, Any]=3072 , UpperCamelCase_ : Any=12 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Any=3 , UpperCamelCase_ : str=768 , UpperCamelCase_ : Dict=32 , UpperCamelCase_ : str="quick_gelu" , UpperCamelCase_ : int=1E-5 , UpperCamelCase_ : str=0.0 , UpperCamelCase_ : str=0.0_2 , UpperCamelCase_ : Optional[Any]=1.0 , **UpperCamelCase_ : Dict , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase_ : Dict =hidden_size
lowerCAmelCase_ : List[str] =intermediate_size
lowerCAmelCase_ : Union[str, Any] =num_hidden_layers
lowerCAmelCase_ : str =num_attention_heads
lowerCAmelCase_ : Any =num_channels
lowerCAmelCase_ : Optional[Any] =image_size
lowerCAmelCase_ : Union[str, Any] =patch_size
lowerCAmelCase_ : int =hidden_act
lowerCAmelCase_ : Optional[int] =layer_norm_eps
lowerCAmelCase_ : Dict =attention_dropout
lowerCAmelCase_ : Tuple =initializer_range
lowerCAmelCase_ : Tuple =initializer_factor
@classmethod
def __A ( cls : Any , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[Any] ):
cls._set_token_in_kwargs(UpperCamelCase_ )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] =cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
lowerCAmelCase_ : Tuple =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : Dict = '''owlvit'''
_UpperCamelCase : int = True
def __init__( self : List[Any] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : List[str]=512 , UpperCamelCase_ : Union[str, Any]=2.6_5_9_2 , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : int , ):
super().__init__(**UpperCamelCase_ )
if text_config is None:
lowerCAmelCase_ : Any ={}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
lowerCAmelCase_ : int ={}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
lowerCAmelCase_ : List[str] =OwlViTTextConfig(**UpperCamelCase_ )
lowerCAmelCase_ : Optional[int] =OwlViTVisionConfig(**UpperCamelCase_ )
lowerCAmelCase_ : List[str] =projection_dim
lowerCAmelCase_ : Optional[Any] =logit_scale_init_value
lowerCAmelCase_ : str =return_dict
lowerCAmelCase_ : Union[str, Any] =1.0
@classmethod
def __A ( cls : str , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : Optional[Any] ):
cls._set_token_in_kwargs(UpperCamelCase_ )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] =cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def __A ( cls : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , **UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase_ : List[str] ={}
lowerCAmelCase_ : Optional[int] =text_config
lowerCAmelCase_ : Optional[int] =vision_config
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
def __A ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] =copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : str =self.text_config.to_dict()
lowerCAmelCase_ : Any =self.vision_config.to_dict()
lowerCAmelCase_ : str =self.__class__.model_type
return output
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
@property
def __A ( self : int ):
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def __A ( self : int ):
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def __A ( self : Any ):
return 1E-4
def __A ( self : Tuple , UpperCamelCase_ : "ProcessorMixin" , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : Optional["TensorType"] = None , ):
lowerCAmelCase_ : Optional[int] =super().generate_dummy_inputs(
processor.tokenizer , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , framework=UpperCamelCase_ )
lowerCAmelCase_ : Union[str, Any] =super().generate_dummy_inputs(
processor.image_processor , batch_size=UpperCamelCase_ , framework=UpperCamelCase_ )
return {**text_input_dict, **image_input_dict}
@property
def __A ( self : List[Any] ):
return 14
| 305
| 0
|
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCAmelCase : List[Any] = re.compile(r"""\s+""")
def a__ ( snake_case__ ) -> Union[str, Any]:
return {"hash": hashlib.mda(re.sub(snake_case__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def a__ ( snake_case__ ) -> Dict:
lowerCamelCase = [len(snake_case__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(snake_case__ ), "line_max": max(snake_case__ )}
def a__ ( snake_case__ ) -> List[str]:
lowerCamelCase = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def a__ ( snake_case__ , snake_case__ ) -> int:
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def a__ ( snake_case__ , snake_case__=5 ) -> int:
lowerCamelCase = ["""auto-generated""", """autogenerated""", """automatically generated"""]
lowerCamelCase = example["""content"""].splitlines()
for _, line in zip(range(snake_case__ ) , snake_case__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a__ ( snake_case__ , snake_case__=5 , snake_case__=0.05 ) -> List[str]:
lowerCamelCase = ["""unit tests""", """test file""", """configuration file"""]
lowerCamelCase = example["""content"""].splitlines()
lowerCamelCase = 0
lowerCamelCase = 0
# first test
for _, line in zip(range(snake_case__ ) , snake_case__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
lowerCamelCase = example["""content"""].count("""\n""" )
lowerCamelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a__ ( snake_case__ ) -> Optional[Any]:
lowerCamelCase = ["""def """, """class """, """for """, """while """]
lowerCamelCase = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a__ ( snake_case__ , snake_case__=4 ) -> Tuple:
lowerCamelCase = example["""content"""].splitlines()
lowerCamelCase = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a__ ( snake_case__ ) -> Union[str, Any]:
lowerCamelCase = tokenizer(example["""content"""] , truncation=snake_case__ )["""input_ids"""]
lowerCamelCase = len(example["""content"""] ) / len(snake_case__ )
return {"ratio": ratio}
def a__ ( snake_case__ ) -> str:
lowerCamelCase = {}
results.update(get_hash(snake_case__ ) )
results.update(line_stats(snake_case__ ) )
results.update(alpha_stats(snake_case__ ) )
results.update(char_token_ratio(snake_case__ ) )
results.update(is_autogenerated(snake_case__ ) )
results.update(is_config_or_test(snake_case__ ) )
results.update(has_no_keywords(snake_case__ ) )
results.update(has_few_assignments(snake_case__ ) )
return results
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
if not check_uniques(snake_case__ , snake_case__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a__ ( snake_case__ ) -> Optional[Any]:
with open(snake_case__ , """rb""" ) as f_in:
with gzip.open(str(snake_case__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(snake_case__ , snake_case__ )
os.unlink(snake_case__ )
# Settings
lowerCAmelCase : str = HfArgumentParser(PreprocessingArguments)
lowerCAmelCase : Tuple = parser.parse_args()
if args.num_workers is None:
lowerCAmelCase : Optional[Any] = multiprocessing.cpu_count()
lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCAmelCase : List[str] = time.time()
lowerCAmelCase : Optional[Any] = load_dataset(args.dataset_name, split="""train""")
print(F"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
lowerCAmelCase : Optional[Any] = time.time()
lowerCAmelCase : List[Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
lowerCAmelCase : str = set(ds.unique("""hash"""))
lowerCAmelCase : List[str] = len(uniques) / len(ds)
print(F"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
lowerCAmelCase : int = time.time()
lowerCAmelCase : Optional[int] = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(F"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(F"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCAmelCase : str = time.time()
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(F"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
lowerCAmelCase : Union[str, Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
lowerCAmelCase : List[str] = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
lowerCAmelCase : Union[str, Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCAmelCase : List[str] = str(data_dir / F"""file-{file_number+1:012}.json""")
lowerCAmelCase : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
| 543
|
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , _a , _a , _a = None , _a = 50_257 , _a = 1_024 , _a = 768 , _a = 12 , _a = 12 , _a = None , _a = "gelu_new" , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 1e-5 , _a = 0.02 , _a = True , _a = True , _a = False , _a = False , ):
"""simple docstring"""
super().__init__()
lowerCamelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
lowerCamelCase = prefix_inner_dim
lowerCamelCase = prefix_hidden_dim
lowerCamelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCamelCase = (
nn.Linear(self.prefix_hidden_dim , _a ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCamelCase = GPTaConfig(
vocab_size=_a , n_positions=_a , n_embd=_a , n_layer=_a , n_head=_a , n_inner=_a , activation_function=_a , resid_pdrop=_a , embd_pdrop=_a , attn_pdrop=_a , layer_norm_epsilon=_a , initializer_range=_a , scale_attn_weights=_a , use_cache=_a , scale_attn_by_inverse_layer_idx=_a , reorder_and_upcast_attn=_a , )
lowerCamelCase = GPTaLMHeadModel(_a )
def _lowerCAmelCase ( self , _a , _a , _a = None , _a = None , ):
"""simple docstring"""
lowerCamelCase = self.transformer.transformer.wte(_a )
lowerCamelCase = self.encode_prefix(_a )
lowerCamelCase = self.decode_prefix(_a )
lowerCamelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCamelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCamelCase = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCamelCase = self.transformer(inputs_embeds=_a , labels=_a , attention_mask=_a )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
return torch.zeros(_a , self.prefix_length , dtype=torch.intaa , device=_a )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self.encode_prefix(_a )
@torch.no_grad()
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = torch.split(_a , 1 , dim=0 )
lowerCamelCase = []
lowerCamelCase = []
for feature in features:
lowerCamelCase = self.decode_prefix(feature.to(_a ) ) # back to the clip feature
# Only support beam search for now
lowerCamelCase , lowerCamelCase = self.generate_beam(
input_embeds=_a , device=_a , eos_token_id=_a )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCamelCase = torch.stack(_a )
lowerCamelCase = torch.stack(_a )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _lowerCAmelCase ( self , _a=None , _a=None , _a=None , _a = 5 , _a = 67 , _a = 1.0 , _a = None , ):
"""simple docstring"""
lowerCamelCase = eos_token_id
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = torch.ones(_a , device=_a , dtype=torch.int )
lowerCamelCase = torch.zeros(_a , device=_a , dtype=torch.bool )
if input_embeds is not None:
lowerCamelCase = input_embeds
else:
lowerCamelCase = self.transformer.transformer.wte(_a )
for i in range(_a ):
lowerCamelCase = self.transformer(inputs_embeds=_a )
lowerCamelCase = outputs.logits
lowerCamelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCamelCase = logits.softmax(-1 ).log()
if scores is None:
lowerCamelCase , lowerCamelCase = logits.topk(_a , -1 )
lowerCamelCase = generated.expand(_a , *generated.shape[1:] )
lowerCamelCase , lowerCamelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCamelCase = next_tokens
else:
lowerCamelCase = tokens.expand(_a , *tokens.shape[1:] )
lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCamelCase = -float(np.inf )
lowerCamelCase = 0
lowerCamelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCamelCase = scores_sum / seq_lengths[:, None]
lowerCamelCase , lowerCamelCase = scores_sum_average.view(-1 ).topk(_a , -1 )
lowerCamelCase = next_tokens // scores_sum.shape[1]
lowerCamelCase = seq_lengths[next_tokens_source]
lowerCamelCase = next_tokens % scores_sum.shape[1]
lowerCamelCase = next_tokens.unsqueeze(1 )
lowerCamelCase = tokens[next_tokens_source]
lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
lowerCamelCase = generated[next_tokens_source]
lowerCamelCase = scores_sum_average * seq_lengths
lowerCamelCase = is_stopped[next_tokens_source]
lowerCamelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCamelCase = torch.cat((generated, next_token_embed) , dim=1 )
lowerCamelCase = is_stopped + next_tokens.eq(_a ).squeeze()
if is_stopped.all():
break
lowerCamelCase = scores / seq_lengths
lowerCamelCase = scores.argsort(descending=_a )
# tokens tensors are already padded to max_seq_length
lowerCamelCase = [tokens[i] for i in order]
lowerCamelCase = torch.stack(_a , dim=0 )
lowerCamelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 543
| 1
|
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __UpperCamelCase (_UpperCAmelCase ):
__A = DistilBertTokenizer
__A = DistilBertTokenizerFast
__A = True
@slow
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
lowercase = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
lowercase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 720
|
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE ( ):
lowercase = os.path.join(os.path.dirname(lowercase_ ) , """num.txt""" )
with open(lowercase_ ) as file_hand:
return str(sum(int(lowercase_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 653
| 0
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a ( A__ ) -> List[Any]:
'''simple docstring'''
for param in module.parameters():
SCREAMING_SNAKE_CASE__ : Optional[int] = False
def a ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
SCREAMING_SNAKE_CASE__ : List[str] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a ( A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = plt.imshow(A__ )
fig.axes.get_xaxis().set_visible(A__ )
fig.axes.get_yaxis().set_visible(A__ )
plt.show()
def a ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = datetime.now()
SCREAMING_SNAKE_CASE__ : Optional[int] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 35
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase ( unittest.TestCase ):
lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase : Any = ['''accelerate''', '''launch''']
lowerCamelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase : Optional[int] = '''default_config.yaml'''
lowerCamelCase : Optional[Any] = config_folder / config_file
lowerCamelCase : Optional[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase : Optional[Any] = Path('''tests/test_configs''' )
@classmethod
def lowercase__ ( cls : Any ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase__ ( cls : List[Any] ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Tuple ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_lowercase ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_lowercase ), self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Optional[int] ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowercase ( unittest.TestCase ):
lowerCamelCase : str = '''test-tpu'''
lowerCamelCase : Tuple = '''us-central1-a'''
lowerCamelCase : Optional[int] = '''ls'''
lowerCamelCase : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase : Tuple = '''cd /usr/share'''
lowerCamelCase : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase : Any = '''Running gcloud compute tpus tpu-vm ssh'''
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_lowercase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
| 35
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ :List[str] = logging.get_logger(__name__)
UpperCAmelCase__ :Any = {"""vocab_file""": """sentencepiece.model"""}
UpperCAmelCase__ :Optional[int] = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
UpperCAmelCase__ :str = {
"""google/rembert""": 256,
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : Optional[int] = VOCAB_FILES_NAMES
snake_case__ : Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str , A__ : Optional[int] , A__ : str=False , A__ : Tuple=True , A__ : Any=True , A__ : List[str]="[CLS]" , A__ : int="[SEP]" , A__ : Union[str, Any]="[UNK]" , A__ : List[Any]="[SEP]" , A__ : str="[PAD]" , A__ : Optional[Any]="[CLS]" , A__ : Optional[int]="[MASK]" , **A__ : Dict , ):
"""simple docstring"""
super().__init__(
do_lower_case=A__ , remove_space=A__ , keep_accents=A__ , bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , **A__ , )
__lowerCamelCase : List[str] = do_lower_case
__lowerCamelCase : List[Any] = remove_space
__lowerCamelCase : Dict = keep_accents
__lowerCamelCase : Any = vocab_file
__lowerCamelCase : int = spm.SentencePieceProcessor()
self.sp_model.Load(A__ )
@property
def a_ ( self : List[Any] ):
"""simple docstring"""
return len(self.sp_model )
def a_ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : str = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
"""simple docstring"""
__lowerCamelCase : Tuple = self.__dict__.copy()
__lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : List[str] , A__ : List[str] ):
"""simple docstring"""
__lowerCamelCase : Any = d
__lowerCamelCase : int = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a_ ( self : Dict , A__ : List[Any] , A__ : int=False ):
"""simple docstring"""
__lowerCamelCase : List[str] = self.sp_model.EncodeAsPieces(A__ )
return pieces
def a_ ( self : List[str] , A__ : int ):
"""simple docstring"""
return self.sp_model.PieceToId(A__ )
def a_ ( self : str , A__ : str ):
"""simple docstring"""
return self.sp_model.IdToPiece(A__ )
def a_ ( self : Any , A__ : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = self.sp_model.decode_pieces(A__ )
return out_string
def a_ ( self : Optional[Any] , A__ : List[int] , A__ : Optional[List[int]] = None ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = [self.sep_token_id]
__lowerCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a_ ( self : List[Any] , A__ : List[int] , A__ : Optional[List[int]] = None , A__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A__ )) + [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1]
def a_ ( self : Optional[int] , A__ : List[int] , A__ : Optional[List[int]] = None ):
"""simple docstring"""
__lowerCamelCase : List[Any] = [self.sep_token_id]
__lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a_ ( self : List[Any] , A__ : str , A__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(A__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(A__ ) )
return
__lowerCamelCase : Tuple = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ):
copyfile(self.vocab_file , A__ )
return (out_vocab_file,)
| 483
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ :List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
snake_case__ : Optional[Any] = XLMRobertaTokenizer
snake_case__ : str = XLMRobertaTokenizerFast
snake_case__ : str = True
snake_case__ : int = True
def a_ ( self : Dict ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : str = XLMRobertaTokenizer(A__ , keep_accents=A__ )
tokenizer.save_pretrained(self.tmpdirname )
def a_ ( self : int ):
"""simple docstring"""
__lowerCamelCase : List[Any] = """<pad>"""
__lowerCamelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A__ ) , A__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A__ ) , A__ )
def a_ ( self : str ):
"""simple docstring"""
__lowerCamelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A__ ) , 1002 )
def a_ ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def a_ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase : Any = XLMRobertaTokenizer(A__ , keep_accents=A__ )
__lowerCamelCase : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowerCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(A__ )
self.assertListEqual(
A__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(A__ )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def a_ ( self : Any ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowerCamelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowerCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
__lowerCamelCase : Dict = self.tokenizer_class.from_pretrained(A__ , **A__ )
__lowerCamelCase : str = tempfile.mkdtemp()
__lowerCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(A__ )
__lowerCamelCase : Union[str, Any] = tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__lowerCamelCase : Any = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A__ , A__ )
# Checks everything loads correctly in the same way
__lowerCamelCase : str = tokenizer_r.from_pretrained(A__ )
__lowerCamelCase : str = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=True
__lowerCamelCase : Optional[Any] = tempfile.mkdtemp()
__lowerCamelCase : Dict = tokenizer_r.save_pretrained(A__ , legacy_format=A__ )
__lowerCamelCase : List[Any] = tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files
self.assertSequenceEqual(A__ , A__ )
# Checks everything loads correctly in the same way
__lowerCamelCase : Any = tokenizer_r.from_pretrained(A__ )
__lowerCamelCase : List[str] = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=False
__lowerCamelCase : int = tempfile.mkdtemp()
__lowerCamelCase : Any = tokenizer_r.save_pretrained(A__ , legacy_format=A__ )
__lowerCamelCase : List[str] = tokenizer_p.save_pretrained(A__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowerCamelCase : List[Any] = tokenizer_r.from_pretrained(A__ )
__lowerCamelCase : Optional[int] = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
@cached_property
def a_ ( self : str ):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def a_ ( self : int ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(A__ , f.name )
__lowerCamelCase : int = XLMRobertaTokenizer(f.name , keep_accents=A__ )
__lowerCamelCase : str = pickle.dumps(A__ )
pickle.loads(A__ )
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowerCamelCase : Union[str, Any] = self.get_tokenizer()
__lowerCamelCase : Tuple = self.get_rust_tokenizer()
__lowerCamelCase : Optional[Any] = """I was born in 92000, and this is falsé."""
__lowerCamelCase : int = tokenizer.tokenize(A__ )
__lowerCamelCase : Optional[Any] = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
__lowerCamelCase : Any = tokenizer.encode(A__ , add_special_tokens=A__ )
__lowerCamelCase : Dict = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
__lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
__lowerCamelCase : Optional[int] = tokenizer.encode(A__ )
__lowerCamelCase : str = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
@slow
def a_ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : List[Any] = """Hello World!"""
__lowerCamelCase : str = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(A__ , self.big_tokenizer.encode(A__ ) )
@slow
def a_ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__lowerCamelCase : Optional[Any] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(A__ , self.big_tokenizer.encode(A__ ) )
@slow
def a_ ( self : Any ):
"""simple docstring"""
__lowerCamelCase : Tuple = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A__ , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 483
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.