code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : Optional[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 120
|
'''simple docstring'''
import numpy
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : numpy.ndarray ) -> None:
__magic_name__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__magic_name__ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__magic_name__ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__magic_name__ = numpy.random.rand(3 , 1 )
# Real output values provided.
__magic_name__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__magic_name__ = numpy.zeros(output_array.shape )
def __A ( self : int ) -> numpy.ndarray:
__magic_name__ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __A ( self : Dict ) -> None:
__magic_name__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__magic_name__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__magic_name__ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __A ( self : Optional[int] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : int , _lowerCamelCase : bool ) -> None:
for iteration in range(1 , iterations + 1 ):
__magic_name__ = self.feedforward()
self.back_propagation()
if give_loss:
__magic_name__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'Iteration {iteration} Loss: {loss}' )
def __A ( self : Tuple , _lowerCamelCase : numpy.ndarray ) -> int:
__magic_name__ = input_arr
__magic_name__ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __snake_case ( lowerCamelCase_ : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def __snake_case ( lowerCamelCase_ : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__magic_name__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__magic_name__ = TwoHiddenLayerNeuralNetwork(
input_array=lowerCamelCase_ , output_array=lowerCamelCase_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=lowerCamelCase_ , iterations=10 , give_loss=lowerCamelCase_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 664
| 0
|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
print(f"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(UpperCamelCase_ ):
print(f"{i}\t\t{d}" )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
for j in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = [float("""inf""" )] * vertex_count
__SCREAMING_SNAKE_CASE = 0.0
for _ in range(vertex_count - 1 ):
for j in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
__SCREAMING_SNAKE_CASE = distance[u] + w
__SCREAMING_SNAKE_CASE = check_negative_cycle(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = int(input("Enter number of vertices: ").strip())
__magic_name__ = int(input("Enter number of edges: ").strip())
__magic_name__ = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
__magic_name__, __magic_name__, __magic_name__ = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
__magic_name__ = {"src": src, "dst": dest, "weight": weight}
__magic_name__ = int(input("\nEnter shortest path source:").strip())
__magic_name__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 248
|
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__magic_name__ = threading.Lock()
__magic_name__ = None
__magic_name__ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
__magic_name__ = logging.WARNING
__magic_name__ = True
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = os.getenv("""TRANSFORMERS_VERBOSITY""" , UpperCamelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def _lowerCAmelCase ( ):
return __name__.split(""".""" )[0]
def _lowerCAmelCase ( ):
return logging.getLogger(_get_library_name() )
def _lowerCAmelCase ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__SCREAMING_SNAKE_CASE = logging.StreamHandler() # Set sys.stderr as stream.
__SCREAMING_SNAKE_CASE = sys.stderr.flush
# Apply our default configuration to the library root logger.
__SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase ( ):
global _default_handler
with _lock:
if not _default_handler:
return
__SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__SCREAMING_SNAKE_CASE = None
def _lowerCAmelCase ( ):
return log_levels
def _lowerCAmelCase ( UpperCamelCase_ = None ):
if name is None:
__SCREAMING_SNAKE_CASE = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCamelCase_ )
def _lowerCAmelCase ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _lowerCAmelCase ( UpperCamelCase_ ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCamelCase_ )
def _lowerCAmelCase ( ):
return set_verbosity(UpperCamelCase_ )
def _lowerCAmelCase ( ):
return set_verbosity(UpperCamelCase_ )
def _lowerCAmelCase ( ):
return set_verbosity(UpperCamelCase_ )
def _lowerCAmelCase ( ):
return set_verbosity(UpperCamelCase_ )
def _lowerCAmelCase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _lowerCAmelCase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _lowerCAmelCase ( UpperCamelCase_ ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCamelCase_ )
def _lowerCAmelCase ( ):
_configure_library_root_logger()
__SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase ( ):
_configure_library_root_logger()
__SCREAMING_SNAKE_CASE = True
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
__SCREAMING_SNAKE_CASE = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(UpperCamelCase_ )
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCamelCase_ )
def _lowerCAmelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , UpperCamelCase_ )
if no_advisory_warnings:
return
self.warning(*UpperCamelCase_ , **UpperCamelCase_ )
__magic_name__ = warning_advice
@functools.lru_cache(UpperCamelCase_ )
def _lowerCAmelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
self.warning(*UpperCamelCase_ , **UpperCamelCase_ )
__magic_name__ = warning_once
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__): # pylint: disable=unused-argument
__SCREAMING_SNAKE_CASE = args[0] if args else None
def __iter__( self):
return iter(self._iterator)
def __getattr__( self , lowerCAmelCase__):
def empty_fn(*lowerCAmelCase__ , **lowerCAmelCase__): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self):
return self
def __exit__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
return
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
if _tqdm_active:
return tqdm_lib.tqdm(*lowerCAmelCase__ , **lowerCAmelCase__)
else:
return EmptyTqdm(*lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__magic_name__ = _tqdm_cls()
def _lowerCAmelCase ( ):
global _tqdm_active
return bool(_tqdm_active )
def _lowerCAmelCase ( ):
global _tqdm_active
__SCREAMING_SNAKE_CASE = True
hf_hub_utils.enable_progress_bars()
def _lowerCAmelCase ( ):
global _tqdm_active
__SCREAMING_SNAKE_CASE = False
hf_hub_utils.disable_progress_bars()
| 248
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __A ( a ):
"""simple docstring"""
A_ = 'ibert'
def __init__( self , _lowerCamelCase=3_0_5_2_2 , _lowerCamelCase=7_6_8 , _lowerCamelCase=1_2 , _lowerCamelCase=1_2 , _lowerCamelCase=3_0_7_2 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , )-> Dict:
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = quant_mode
lowercase__ = force_dequant
class __A ( a ):
"""simple docstring"""
@property
def snake_case_( self )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 161
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( a ):
"""simple docstring"""
A_ = ['image_processor', 'tokenizer']
A_ = 'BridgeTowerImageProcessor'
A_ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , _lowerCamelCase , _lowerCamelCase )-> str:
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , )-> BatchEncoding:
lowercase__ = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
# add pixel_values + pixel_mask
lowercase__ = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , do_normalize=_lowerCamelCase , do_center_crop=_lowerCamelCase , **_lowerCamelCase )
encoding.update(_lowerCamelCase )
return encoding
def snake_case_( self , *_lowerCamelCase , **_lowerCamelCase )-> str:
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def snake_case_( self , *_lowerCamelCase , **_lowerCamelCase )-> List[Any]:
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def snake_case_( self )-> List[Any]:
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 161
| 1
|
'''simple docstring'''
class A :
def __init__( self , SCREAMING_SNAKE_CASE = "" , SCREAMING_SNAKE_CASE = False ) -> None:
"""simple docstring"""
A : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
A : Optional[int] = is_leaf
A : Any = prefix
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> tuple[str, str, str]:
"""simple docstring"""
A : List[str] = 0
for q, w in zip(self.prefix , SCREAMING_SNAKE_CASE ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
for word in words:
self.insert(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if self.prefix == word:
A : Optional[int] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
A : str = RadixNode(prefix=SCREAMING_SNAKE_CASE , is_leaf=SCREAMING_SNAKE_CASE )
else:
A : Dict = self.nodes[word[0]]
A : List[Any] = incoming_node.match(
SCREAMING_SNAKE_CASE )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
A : str = remaining_prefix
A : Tuple = self.nodes[matching_string[0]]
A : List[str] = RadixNode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = aux_node
if remaining_word == "":
A : Union[str, Any] = True
else:
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
A : int = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE )
if not incoming_node:
return False
else:
A : int = incoming_node.match(
SCREAMING_SNAKE_CASE )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
A : List[Any] = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE )
if not incoming_node:
return False
else:
A : Union[str, Any] = incoming_node.match(
SCREAMING_SNAKE_CASE )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(SCREAMING_SNAKE_CASE )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
A : Any = list(self.nodes.values() )[0]
A : Any = merging_node.is_leaf
self.prefix += merging_node.prefix
A : Dict = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
A : Dict = False
# If there is 1 edge, we merge it with its child
else:
A : int = list(incoming_node.nodes.values() )[0]
A : Optional[int] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
A : Dict = merging_node.nodes
return True
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE = 0 ) -> None:
"""simple docstring"""
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Optional[Any] = '''banana bananas bandana band apple all beast'''.split()
A : List[Any] = RadixNode()
root.insert_many(snake_case__ )
assert all(root.find(snake_case__ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert test_trie()
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Union[str, Any] = RadixNode()
A : Tuple = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(snake_case__ )
print('''Words:''' , snake_case__ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 721
|
'''simple docstring'''
import math
import sys
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = ''''''
try:
with open(snake_case__ , '''rb''' ) as binary_file:
A : Optional[Any] = binary_file.read()
for dat in data:
A : Union[str, Any] = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[int] = {'''0''': '''0''', '''1''': '''1'''}
A, A : Union[str, Any] = '''''', ''''''
A : str = len(snake_case__ )
for i in range(len(snake_case__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A : Dict = lexicon[curr_string]
result += last_match_id
A : Any = last_match_id + '''0'''
if math.loga(snake_case__ ).is_integer():
A : Optional[int] = {}
for curr_key in list(snake_case__ ):
A : Any = lexicon.pop(snake_case__ )
A : List[str] = new_lex
A : Dict = last_match_id + '''1'''
index += 1
A : List[str] = ''''''
return result
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Tuple = 8
try:
with open(snake_case__ , '''wb''' ) as opened_file:
A : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case__ ) , snake_case__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(snake_case__ , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[int] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A : Union[str, Any] = data_bits[counter:]
A : Tuple = data_bits[counter + 1 :]
return data_bits
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : int = read_file_binary(snake_case__ )
A : Dict = remove_prefix(snake_case__ )
A : Union[str, Any] = decompress_data(snake_case__ )
write_file_binary(snake_case__ , snake_case__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 343
| 0
|
'''simple docstring'''
def lowercase_ ( __A : float , __A : float ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(100, 0.25) = }""")
print(f"""{price_plus_tax(125.50, 0.05) = }""")
| 94
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _lowerCamelCase ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
"""simple docstring"""
def __init__( self , UpperCAmelCase=None , **UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__(features=UpperCAmelCase )
__snake_case : Optional[int] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCAmelCase )
return column
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__snake_case : Optional[Any] = {}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__snake_case : List[Any] = {"dtype": torch.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__snake_case : Union[str, Any] = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
__snake_case : Optional[int] = np.asarray(UpperCAmelCase )
return torch.tensor(UpperCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , torch.Tensor ):
__snake_case : List[str] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Mapping:
'''simple docstring'''
__snake_case : Any = self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
__snake_case : Dict = self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> "torch.Tensor":
'''simple docstring'''
__snake_case : Optional[Any] = self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
__snake_case : List[str] = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
__snake_case : Union[str, Any] = self.recursive_tensorize(UpperCAmelCase )
__snake_case : Union[str, Any] = self._consolidate(UpperCAmelCase )
return column
def UpperCAmelCase ( self , UpperCAmelCase ) -> Mapping:
'''simple docstring'''
__snake_case : str = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
__snake_case : List[str] = self.python_features_decoder.decode_batch(UpperCAmelCase )
__snake_case : Union[str, Any] = self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
__snake_case : Dict = self._consolidate(batch[column_name] )
return batch
| 243
| 0
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Union[str, Any]:
_UpperCAmelCase = f"{sampling_rate}"
_UpperCAmelCase = """1"""
_UpperCAmelCase = """f32le"""
_UpperCAmelCase = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(_lowercase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_UpperCAmelCase = ffmpeg_process.communicate(_lowercase )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
_UpperCAmelCase = output_stream[0]
_UpperCAmelCase = np.frombuffer(_lowercase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case = "f32le" , ) -> List[str]:
_UpperCAmelCase = f"{sampling_rate}"
_UpperCAmelCase = """1"""
if format_for_conversion == "s16le":
_UpperCAmelCase = 2
elif format_for_conversion == "f32le":
_UpperCAmelCase = 4
else:
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
_UpperCAmelCase = platform.system()
if system == "Linux":
_UpperCAmelCase = """alsa"""
_UpperCAmelCase = """default"""
elif system == "Darwin":
_UpperCAmelCase = """avfoundation"""
_UpperCAmelCase = """:0"""
elif system == "Windows":
_UpperCAmelCase = """dshow"""
_UpperCAmelCase = """default"""
_UpperCAmelCase = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
_UpperCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_UpperCAmelCase = _ffmpeg_stream(_lowercase , _lowercase )
for item in iterator:
yield item
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case = None , snake_case = None , snake_case = "f32le" , ) -> str:
if stream_chunk_s is not None:
_UpperCAmelCase = stream_chunk_s
else:
_UpperCAmelCase = chunk_length_s
_UpperCAmelCase = ffmpeg_microphone(_lowercase , _lowercase , format_for_conversion=_lowercase )
if format_for_conversion == "s16le":
_UpperCAmelCase = np.intaa
_UpperCAmelCase = 2
elif format_for_conversion == "f32le":
_UpperCAmelCase = np.floataa
_UpperCAmelCase = 4
else:
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
_UpperCAmelCase = chunk_length_s / 6
_UpperCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_lowercase , (int, float) ):
_UpperCAmelCase = [stride_length_s, stride_length_s]
_UpperCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_UpperCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_UpperCAmelCase = datetime.datetime.now()
_UpperCAmelCase = datetime.timedelta(seconds=_lowercase )
for item in chunk_bytes_iter(_lowercase , _lowercase , stride=(stride_left, stride_right) , stream=_lowercase ):
# Put everything back in numpy scale
_UpperCAmelCase = np.frombuffer(item["""raw"""] , dtype=_lowercase )
_UpperCAmelCase = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
_UpperCAmelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case = False ) -> Union[str, Any]:
_UpperCAmelCase = B""""""
_UpperCAmelCase , _UpperCAmelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
_UpperCAmelCase = 0
for raw in iterator:
acc += raw
if stream and len(_lowercase ) < chunk_len:
_UpperCAmelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_lowercase ) >= chunk_len:
# We are flushing the accumulator
_UpperCAmelCase = (_stride_left, stride_right)
_UpperCAmelCase = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
_UpperCAmelCase = False
yield item
_UpperCAmelCase = stride_left
_UpperCAmelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_lowercase ) > stride_left:
_UpperCAmelCase = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
_UpperCAmelCase = False
yield item
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Any:
_UpperCAmelCase = 2**2_4 # 16Mo
try:
with subprocess.Popen(_lowercase , stdout=subprocess.PIPE , bufsize=_lowercase ) as ffmpeg_process:
while True:
_UpperCAmelCase = ffmpeg_process.stdout.read(_lowercase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 718
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case = "cpu" , snake_case = None ) -> None:
_UpperCAmelCase = torch.load(snake_case , map_location=snake_case )
for k, v in tqdm(state_dict.items() ):
if not isinstance(snake_case , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
_UpperCAmelCase = v.half()
if save_path is None: # overwrite src_path
_UpperCAmelCase = src_path
torch.save(snake_case , snake_case )
if __name__ == "__main__":
fire.Fire(convert)
| 175
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=7 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : List[Any]=3_0 , lowerCAmelCase_ : List[str]=4_0_0 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[Any]=0.9 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , lowerCAmelCase_ : List[str]=[0.5, 0.5, 0.5] , ) -> List[str]:
__lowerCAmelCase = size if size is not None else {'shortest_edge': 3_0}
__lowerCAmelCase = crop_size if crop_size is not None else {'height': 3_0, 'width': 3_0}
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize_and_center_crop
__lowerCAmelCase = size
__lowerCAmelCase = crop_pct
__lowerCAmelCase = crop_size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
def lowercase ( self : str ) -> Tuple:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = PoolFormerImageProcessor if is_vision_available() else None
def lowercase ( self : int ) -> str:
__lowerCAmelCase = PoolFormerImageProcessingTester(self )
@property
def lowercase ( self : List[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'crop_pct' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'image_std' ) )
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 3_0} )
self.assertEqual(image_processor.crop_size , {'height': 3_0, 'width': 3_0} )
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def lowercase ( self : List[Any] ) -> str:
pass
def lowercase ( self : Tuple ) -> Tuple:
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowerCAmelCase = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase ( self : Tuple ) -> Optional[int]:
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowerCAmelCase = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase ( self : Dict ) -> Optional[Any]:
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowerCAmelCase = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 53
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Union[str, Any] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : int=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Optional[int]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ''
else:
__lowerCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def a_ ( lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[Any]=True ):
__lowerCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowerCAmelCase = 8
# set labels if required
if not base_model:
__lowerCAmelCase = 1000
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 6
# load original model from torch hub
__lowerCAmelCase = torch.hub.load('facebookresearch/dino:main', lowerCAmelCase_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_, base_model=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# load HuggingFace model
if base_model:
__lowerCAmelCase = ViTModel(lowerCAmelCase_, add_pooling_layer=lowerCAmelCase_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor
__lowerCAmelCase = ViTImageProcessor()
__lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='pt' )
__lowerCAmelCase = encoding['pixel_values']
__lowerCAmelCase = model(lowerCAmelCase_ )
if base_model:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_, outputs.logits, atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
_snake_case : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 53
| 1
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ : str = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Any = GPTSwaTokenizer
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : str = False
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = GPTSwaTokenizer(_snake_case , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''')
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''This is a test'''
UpperCAmelCase_ = '''This is a test'''
return input_text, output_text
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(_snake_case) , 2000)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2000)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = GPTSwaTokenizer(_snake_case)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case) , [465, 287, 265, 631, 842])
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
# fmt: off
self.assertListEqual(
_snake_case , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_snake_case)
self.assertListEqual(
_snake_case , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_snake_case)
# fmt: off
self.assertListEqual(
_snake_case , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''])
# fmt: on
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = GPTSwaTokenizer(_snake_case)
UpperCAmelCase_ = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
UpperCAmelCase_ = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_snake_case , _snake_case):
self.assertListEqual(tokenizer.encode_fast(_snake_case) , _snake_case)
# Test that decode_fast returns the input text
for text, token_ids in zip(_snake_case , _snake_case):
self.assertEqual(tokenizer.decode_fast(_snake_case) , _snake_case)
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=_snake_case , )
| 718
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class __snake_case ( a ):
UpperCAmelCase__ : Any = '''roberta'''
def __init__( self : Tuple , _snake_case : int=50265 , _snake_case : Dict=768 , _snake_case : Union[str, Any]=12 , _snake_case : Tuple=12 , _snake_case : Optional[Any]=3072 , _snake_case : str="gelu" , _snake_case : Tuple=0.1 , _snake_case : str=0.1 , _snake_case : Tuple=512 , _snake_case : List[str]=2 , _snake_case : Optional[int]=0.0_2 , _snake_case : Tuple=1e-12 , _snake_case : Tuple=1 , _snake_case : int=0 , _snake_case : str=2 , _snake_case : Tuple="absolute" , _snake_case : Any=True , _snake_case : Union[str, Any]=None , **_snake_case : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case)
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class __snake_case ( a ):
@property
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 169
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Dict , *lowerCAmelCase__ : int , **lowerCAmelCase__ : Dict ) -> None:
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 98
|
'''simple docstring'''
import math
def a__ ( ) -> None:
UpperCAmelCase__ : List[str] = input('''Enter message: ''' )
UpperCAmelCase__ : Any = int(input(F"""Enter key [2-{len(lowerCAmelCase__ ) - 1}]: """ ) )
UpperCAmelCase__ : List[str] = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
UpperCAmelCase__ : Dict = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
elif mode.lower().startswith('''d''' ):
UpperCAmelCase__ : Optional[int] = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + "|"}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[int] = [''''''] * key
for col in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = col
while pointer < len(lowerCAmelCase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : int = math.ceil(len(lowerCAmelCase__ ) / key )
UpperCAmelCase__ : Any = key
UpperCAmelCase__ : Optional[int] = (num_cols * num_rows) - len(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = [''''''] * num_cols
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : List[Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase__ : Optional[int] = 0
row += 1
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 75
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__a : Dict = {'tokenization_bertweet': ['BertweetTokenizer']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
__a : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 707
|
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: List[str] , __A: str , __A: Any=2 , __A: str=3 , __A: Tuple=4 , __A: Dict=2 , __A: List[Any]=7 , __A: Any=True , __A: Any=True , __A: List[str]=True , __A: Optional[int]=True , __A: Optional[int]=99 , __A: Tuple=36 , __A: List[str]=2 , __A: Dict=4 , __A: List[str]=37 , __A: Optional[int]="gelu" , __A: Optional[int]=0.1 , __A: Tuple=0.1 , __A: List[Any]=512 , __A: List[str]=16 , __A: Any=2 , __A: Union[str, Any]=0.0_2 , __A: Optional[int]=6 , __A: Union[str, Any]=6 , __A: Union[str, Any]=3 , __A: Tuple=4 , __A: Optional[int]=None , __A: Optional[Any]=1000 , ):
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = num_channels
a__ = image_size
a__ = patch_size
a__ = is_training
a__ = use_input_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = coordinate_size
a__ = shape_size
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a__ = text_seq_length
a__ = (image_size // patch_size) ** 2 + 1
a__ = self.text_seq_length + self.image_seq_length
def lowercase ( self: Optional[int] ):
'''simple docstring'''
a__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
a__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
a__ = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a__ = bbox[i, j, 3]
a__ = bbox[i, j, 1]
a__ = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
a__ = bbox[i, j, 2]
a__ = bbox[i, j, 0]
a__ = tmp_coordinate
a__ = tf.constant(__A )
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.text_seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
a__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase ( self: List[str] , __A: List[str] , __A: List[str] , __A: List[str] , __A: int , __A: Any , __A: Any ):
'''simple docstring'''
a__ = TFLayoutLMvaModel(config=__A )
# text + image
a__ = model(__A , pixel_values=__A , training=__A )
a__ = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , training=__A , )
a__ = model(__A , bbox=__A , pixel_values=__A , training=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
a__ = model(__A , training=__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
a__ = model({'''pixel_values''': pixel_values} , training=__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase ( self: Optional[int] , __A: Any , __A: str , __A: List[str] , __A: List[str] , __A: List[str] , __A: Optional[Any] , __A: Any ):
'''simple docstring'''
a__ = self.num_labels
a__ = TFLayoutLMvaForSequenceClassification(config=__A )
a__ = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , training=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self: str , __A: List[str] , __A: int , __A: str , __A: Any , __A: str , __A: str , __A: str ):
'''simple docstring'''
a__ = self.num_labels
a__ = TFLayoutLMvaForTokenClassification(config=__A )
a__ = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , training=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase ( self: Union[str, Any] , __A: Any , __A: List[Any] , __A: Any , __A: List[str] , __A: Any , __A: Optional[int] , __A: Tuple ):
'''simple docstring'''
a__ = 2
a__ = TFLayoutLMvaForQuestionAnswering(config=__A )
a__ = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , training=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self: int ):
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
((a__) ,(a__) ,(a__) ,(a__) ,(a__) ,(a__) ,(a__) ,(a__)) = config_and_inputs
a__ = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE =(
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
def lowercase ( self: List[str] , __A: Dict , __A: Optional[Any] , __A: str , __A: Union[str, Any] , __A: Optional[Any] ):
'''simple docstring'''
return True
def lowercase ( self: Dict , __A: Optional[Any] , __A: Any , __A: List[Any]=False ):
'''simple docstring'''
a__ = copy.deepcopy(__A )
if model_class in get_values(__A ):
a__ = {
k: tf.tile(tf.expand_dims(__A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__A , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__A ):
a__ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__A ):
a__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
a__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__A ):
a__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__A ):
a__ = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase ( self: List[str] ):
'''simple docstring'''
a__ = TFLayoutLMvaModelTester(self )
a__ = ConfigTester(self , config_class=__A , hidden_size=37 )
def lowercase ( self: Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase ( self: Any ):
'''simple docstring'''
a__ ,a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__A )
if getattr(__A , '''hf_compute_loss''' , __A ):
# The number of elements in the loss should be the same as the number of elements in the label
a__ = self._prepare_for_class(inputs_dict.copy() , __A , return_labels=__A )
a__ = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__A )[0]
]
a__ = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
a__ = self._prepare_for_class(inputs_dict.copy() , __A , return_labels=__A )
a__ = prepared_for_class.pop('''input_ids''' )
a__ = model(__A , **__A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
a__ = self._prepare_for_class(inputs_dict.copy() , __A , return_labels=__A )
a__ = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
a__ = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
a__ = -100
a__ = tf.convert_to_tensor(__A )
a__ = model(__A , **__A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
a__ = self._prepare_for_class(inputs_dict.copy() , __A , return_labels=__A )
a__ = model(__A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
a__ = self._prepare_for_class(inputs_dict.copy() , __A , return_labels=__A )
# Get keys that were added with the _prepare_for_class function
a__ = prepared_for_class.keys() - inputs_dict.keys()
a__ = inspect.signature(model.call ).parameters
a__ = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
a__ = {0: '''input_ids'''}
for label_key in label_keys:
a__ = signature_names.index(__A )
a__ = label_key
a__ = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
a__ = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
a__ = prepared_for_class[value]
a__ = tuple(__A )
# Send to model
a__ = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase ( self: Optional[int] ):
'''simple docstring'''
(
(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__A , __A , __A , __A , __A , __A )
def lowercase ( self: Dict ):
'''simple docstring'''
(
(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ = type
self.model_tester.create_and_check_model(__A , __A , __A , __A , __A , __A )
def lowercase ( self: int ):
'''simple docstring'''
(
(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__A , __A , __A , __A , __A , __A , __A )
def lowercase ( self: List[str] ):
'''simple docstring'''
(
(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__A , __A , __A , __A , __A , __A , __A )
def lowercase ( self: Tuple ):
'''simple docstring'''
(
(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__A , __A , __A , __A , __A , __A , __A )
@slow
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = TFLayoutLMvaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def SCREAMING_SNAKE_CASE ( ):
a__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self: Any ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__A ) if is_vision_available() else None
@slow
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
a__ = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=__A , return_tensors='''tf''' ).pixel_values
a__ = tf.constant([[1, 2]] )
a__ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
a__ = model(input_ids=__A , bbox=__A , pixel_values=__A , training=__A )
# verify the logits
a__ = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , __A )
a__ = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1e-4 ) )
| 200
| 0
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowercase( __a : Any ):
return getitem, k
def _lowercase( __a : List[Any] , __a : Tuple ):
return setitem, k, v
def _lowercase( __a : Tuple ):
return delitem, k
def _lowercase( __a : List[str] , __a : Optional[int] , *__a : Union[str, Any] ):
try:
return fun(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ), None
except Exception as e:
return None, e
_lowerCAmelCase: Optional[Any] = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
_lowerCAmelCase: Tuple = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
_lowerCAmelCase: Union[str, Any] = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
_lowerCAmelCase: List[Any] = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
_lowerCAmelCase: Dict = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
_lowerCAmelCase: int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def _lowercase( __a : Dict ):
a__ =HashMap(initial_block_size=4 )
a__ ={}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE__ ):
a__ =_run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
a__ =_run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE__ ) == str(SCREAMING_SNAKE_CASE__ )
assert set(SCREAMING_SNAKE_CASE__ ) == set(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
assert set(my.items() ) == set(py.items() )
def _lowercase( ):
def is_public(__a : str ) -> bool:
return not name.startswith('_' )
a__ ={name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE__ )}
a__ ={name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE__ )}
assert dict_public_names > hash_public_names
| 20
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
a__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(SCREAMING_SNAKE_CASE__ ):
return ext
raise Exception(
F'''Unable to determine file format from file extension {path}. '''
F'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
_snake_case : str = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_snake_case : Optional[Any] = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
_snake_case : Any = PipelineDataFormat.from_str(
format=SCREAMING_SNAKE_CASE__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : Pipeline , lowerCAmelCase : PipelineDataFormat) -> Dict:
"""simple docstring"""
_snake_case : int = nlp
_snake_case : Dict = reader
@staticmethod
def UpperCamelCase_ ( lowerCAmelCase : ArgumentParser) -> Any:
"""simple docstring"""
_snake_case : Any = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""")
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""")
run_parser.add_argument("""--input""" , type=lowerCAmelCase , help="""Path to the file to use for inference""")
run_parser.add_argument("""--output""" , type=lowerCAmelCase , help="""Path to the file that will be used post to write results.""")
run_parser.add_argument("""--model""" , type=lowerCAmelCase , help="""Name or path to the model to instantiate.""")
run_parser.add_argument("""--config""" , type=lowerCAmelCase , help="""Name or path to the model's config to instantiate.""")
run_parser.add_argument(
"""--tokenizer""" , type=lowerCAmelCase , help="""Name of the tokenizer to use. (default: same as the model name)""")
run_parser.add_argument(
"""--column""" , type=lowerCAmelCase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=lowerCAmelCase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=lowerCAmelCase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""")
run_parser.set_defaults(func=lowerCAmelCase)
def UpperCamelCase_ ( self : Optional[int]) -> Tuple:
"""simple docstring"""
_snake_case , _snake_case : int = self._nlp, []
for entry in self._reader:
_snake_case : List[Any] = nlp(**lowerCAmelCase) if self._reader.is_multi_columns else nlp(lowerCAmelCase)
if isinstance(lowerCAmelCase , lowerCAmelCase):
outputs.append(lowerCAmelCase)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_snake_case : Any = self._reader.save_binary(lowerCAmelCase)
logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''')
else:
self._reader.save(lowerCAmelCase)
| 477
| 0
|
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__SCREAMING_SNAKE_CASE = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__SCREAMING_SNAKE_CASE = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def __a ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple ):
a__ : str = SavedModel()
a__ : List[Any] = []
with open(os.path.join(__lowercase , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
a__ : Any = json.load(__lowercase )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__lowercase )] )
with open(__lowercase , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
a__ : int = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
a__ : List[str] = sorted(__lowercase )
a__ : Any = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__lowercase )
if strict and len(__lowercase ) > 0:
raise Exception(F'Found the following incompatible ops for the opset {opset}:\n' + incompatible_ops )
elif len(__lowercase ) > 0:
print(F'Found the following incompatible ops for the opset {opset}:' )
print(*__lowercase , sep='''\n''' )
else:
print(F'The saved model {saved_model_path} can properly be converted with ONNX.' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 704
|
'''simple docstring'''
import os
def __a ( ):
with open(os.path.dirname(lowerCAmelCase__ ) + '''/grid.txt''' ) as f:
a__ : Optional[int] = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowerCAmelCase__ ) for x in f.readline().split()] )
a__ : List[str] = 0
# right
for i in range(20 ):
for j in range(17 ):
a__ : int = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
a__ : str = temp
# down
for i in range(17 ):
for j in range(20 ):
a__ : List[str] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
a__ : Dict = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
a__ : str = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
a__ : Optional[Any] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
a__ : str = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
a__ : Tuple = temp
return maximum
if __name__ == "__main__":
print(solution())
| 340
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , a_ : Optional[Any] , a_ : Dict=13 , a_ : Optional[Any]=7 , a_ : int=6 , a_ : str=17 , a_ : Union[str, Any]=23 , a_ : Dict=11 , a_ : Optional[Any]=True , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = act_dim
__snake_case = state_dim
__snake_case = hidden_size
__snake_case = max_length
__snake_case = is_training
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__snake_case = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__snake_case = floats_tensor((self.batch_size, self.seq_length, 1) )
__snake_case = floats_tensor((self.batch_size, self.seq_length, 1) )
__snake_case = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
__snake_case = random_attention_mask((self.batch_size, self.seq_length) )
__snake_case = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def A ( self : List[str] ):
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def A ( self : List[str] , a_ : Dict , a_ : Any , a_ : Optional[Any] , a_ : Optional[int] , a_ : Tuple , a_ : List[str] , a_ : Any , ):
"""simple docstring"""
__snake_case = DecisionTransformerModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , a_ , a_ , a_ , a_ , a_ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (DecisionTransformerModel,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = ()
__SCREAMING_SNAKE_CASE = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__SCREAMING_SNAKE_CASE = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = DecisionTransformerModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , hidden_size=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
@slow
def A ( self : int ):
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = DecisionTransformerModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(a_ )] , a_ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = 2 # number of steps of autoregressive prediction we will perform
__snake_case = 10 # defined by the RL environment, may be normalized
__snake_case = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
__snake_case = model.to(a_ )
__snake_case = model.config
torch.manual_seed(0 )
__snake_case = torch.randn(1 , 1 , config.state_dim ).to(device=a_ , dtype=torch.floataa ) # env.reset()
__snake_case = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=a_ )
__snake_case = torch.tensor(a_ , device=a_ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__snake_case = state
__snake_case = torch.zeros(1 , 0 , config.act_dim , device=a_ , dtype=torch.floataa )
__snake_case = torch.zeros(1 , 0 , device=a_ , dtype=torch.floataa )
__snake_case = torch.tensor(0 , device=a_ , dtype=torch.long ).reshape(1 , 1 )
for step in range(a_ ):
__snake_case = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=a_ )] , dim=1 )
__snake_case = torch.cat([rewards, torch.zeros(1 , 1 , device=a_ )] , dim=1 )
__snake_case = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__snake_case , __snake_case , __snake_case = model(
states=a_ , actions=a_ , rewards=a_ , returns_to_go=a_ , timesteps=a_ , attention_mask=a_ , return_dict=a_ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
__snake_case , __snake_case , __snake_case , __snake_case = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=a_ , dtype=torch.floataa ),
1.0,
False,
{},
)
__snake_case = action_pred[0, -1]
__snake_case = torch.cat([states, state] , dim=1 )
__snake_case = returns_to_go[0, -1] - reward
__snake_case = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__snake_case = torch.cat(
[timesteps, torch.ones((1, 1) , device=a_ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 69
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
snake_case__ = logging.get_logger(__name__)
class lowerCAmelCase_ ( _a):
def __init__( self : str , *__A : Optional[int] , **__A : int ) ->None:
"""simple docstring"""
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." , __A , )
super().__init__(*__A , **__A )
| 395
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , ):
'''simple docstring'''
__a : List[Any] = parent
__a : Optional[int] = 13
__a : Optional[int] = 7
__a : Any = True
__a : Optional[int] = True
__a : Any = False
__a : str = True
__a : Optional[Any] = 99
__a : Dict = 32
__a : List[Any] = 2
__a : int = 4
__a : List[str] = 37
__a : Dict = """gelu"""
__a : Any = 0.1
__a : Tuple = 0.1
__a : Tuple = 512
__a : Optional[int] = 16
__a : Any = 2
__a : Optional[int] = 0.02
__a : str = 3
__a : Optional[int] = 4
__a : List[str] = None
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[str] = None
if self.use_input_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : str = None
__a : List[Any] = None
__a : Union[str, Any] = None
if self.use_labels:
__a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__a : List[str] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : int = TFDistilBertModel(config=_lowercase )
__a : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__a : str = model(_lowercase )
__a : Optional[Any] = [input_ids, input_mask]
__a : Any = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = TFDistilBertForMaskedLM(config=_lowercase )
__a : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__a : Any = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[int] = TFDistilBertForQuestionAnswering(config=_lowercase )
__a : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
__a : Tuple = model(_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[int] = self.num_labels
__a : Union[str, Any] = TFDistilBertForSequenceClassification(_lowercase )
__a : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__a : Tuple = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[int] = self.num_choices
__a : List[Any] = TFDistilBertForMultipleChoice(_lowercase )
__a : List[str] = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
__a : int = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
__a : int = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
__a : Optional[int] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[str] = self.num_labels
__a : Tuple = TFDistilBertForTokenClassification(_lowercase )
__a : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__a : List[str] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : List[str] = config_and_inputs
__a : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_lowerCAmelCase = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = TFDistilBertModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=_lowercase , dim=37 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__a : Any = TFDistilBertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__a : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__a : Optional[Any] = model(_lowercase )[0]
__a : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _lowercase )
__a : int = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1e-4 )
| 63
|
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=0 ):
'''simple docstring'''
__a : Any = 1.0 if scale is None else scale
__a : str = 0.0 if loc is None else loc
super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.variance.sqrt()
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : str = args_dim
__a : List[Any] = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] )
__a : Dict = domain_map
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = [proj(_lowercase ) for proj in self.proj]
return self.domain_map(*_lowercase )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = function
def lowerCAmelCase__(self , _lowercase , *_lowercase ):
'''simple docstring'''
return self.function(_lowercase , *_lowercase )
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__(self , _lowercase = 1 ):
'''simple docstring'''
__a : Optional[int] = dim
__a : str = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*_lowercase )
else:
return Independent(self.distribution_class(*_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None , ):
'''simple docstring'''
__a : Tuple = self._base_distribution(_lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return len(self.event_shape )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 0.0
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return ParameterProjection(
in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCAmelCase__(self , *_lowercase ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def lowerCAmelCase__(_lowercase ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : int = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__a : Optional[Any] = 2.0 + cls.squareplus(_lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : str = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = cls.squareplus(_lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a , __a : Optional[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowercase , logits=_lowercase )
else:
return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None ):
'''simple docstring'''
__a , __a : List[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 63
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : List[str] = '''encoder-decoder'''
__A : Optional[Any] = True
def __init__( self , **lowercase) -> Optional[int]:
'''simple docstring'''
super().__init__(**a__)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
a__ : Dict = kwargs.pop('encoder')
a__ : Optional[Any] = encoder_config.pop('model_type')
a__ : Optional[int] = kwargs.pop('decoder')
a__ : int = decoder_config.pop('model_type')
from ..auto.configuration_auto import AutoConfig
a__ : List[Any] = AutoConfig.for_model(a__ , **a__)
a__ : List[str] = AutoConfig.for_model(a__ , **a__)
a__ : Any = True
@classmethod
def __lowercase ( cls , lowercase , lowercase , **lowercase) -> PretrainedConfig:
'''simple docstring'''
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config')
a__ : Any = True
a__ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **a__)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] = copy.deepcopy(self.__dict__)
a__ : List[str] = self.encoder.to_dict()
a__ : List[Any] = self.decoder.to_dict()
a__ : Union[str, Any] = self.__class__.model_type
return output
| 302
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
class __lowercase ( A ):
__magic_name__ : Any = '''sequence-classification'''
def __init__( self , a__ ) -> Optional[Any]:
'''simple docstring'''
if type(a__ ) == dict:
A_ = Namespace(**a__ )
A_ = glue_output_modes[hparams.task]
A_ = glue_tasks_num_labels[hparams.task]
super().__init__(a__ , a__ , self.mode )
def lowerCAmelCase_ ( self , **a__ ) -> List[Any]:
'''simple docstring'''
return self.model(**a__ )
def lowerCAmelCase_ ( self , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
A_ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A_ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
A_ = self(**a__ )
A_ = outputs[0]
A_ = self.trainer.lr_schedulers[0]['''scheduler''']
A_ = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
A_ = self.hparams
A_ = processors[args.task]()
A_ = processor.get_labels()
for mode in ["train", "dev"]:
A_ = self._feature_file(a__ )
if os.path.exists(a__ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , a__ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
A_ = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
A_ = convert_examples_to_features(
a__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , a__ )
torch.save(a__ , a__ )
def lowerCAmelCase_ ( self , a__ , a__ , a__ = False ) -> DataLoader:
'''simple docstring'''
A_ = '''dev''' if mode == '''test''' else mode
A_ = self._feature_file(a__ )
logger.info('''Loading features from cached file %s''' , a__ )
A_ = torch.load(a__ )
A_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
A_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
A_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
A_ = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
A_ = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a__ , a__ , a__ , a__ ) , batch_size=a__ , shuffle=a__ , )
def lowerCAmelCase_ ( self , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A_ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
A_ = self(**a__ )
A_ , A_ = outputs[:2]
A_ = logits.detach().cpu().numpy()
A_ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCAmelCase_ ( self , a__ ) -> tuple:
'''simple docstring'''
A_ = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
A_ = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
A_ = np.argmax(a__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
A_ = np.squeeze(a__ )
A_ = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
A_ = [[] for _ in range(out_label_ids.shape[0] )]
A_ = [[] for _ in range(out_label_ids.shape[0] )]
A_ = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , a__ , a__ )}
A_ = dict(results.items() )
A_ = results
return ret, preds_list, out_label_list
def lowerCAmelCase_ ( self , a__ ) -> dict:
'''simple docstring'''
A_ , A_ , A_ = self._eval_end(a__ )
A_ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCAmelCase_ ( self , a__ ) -> dict:
'''simple docstring'''
A_ , A_ , A_ = self._eval_end(a__ )
A_ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCAmelCase_ ( a__ , a__ ) -> Dict:
'''simple docstring'''
BaseTransformer.add_model_specific_args(a__ , a__ )
parser.add_argument(
'''--max_seq_length''' , default=1_2_8 , type=a__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=a__ , required=a__ , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=a__ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def lowerCamelCase_ ( ):
A_ = argparse.ArgumentParser()
add_generic_args(__UpperCamelCase , os.getcwd() )
A_ = GLUETransformer.add_model_specific_args(__UpperCamelCase , os.getcwd() )
A_ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
A_ = os.path.join(
'''./results''' , F"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
A_ = GLUETransformer(__UpperCamelCase )
A_ = generic_train(__UpperCamelCase , __UpperCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
A_ = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=__UpperCamelCase ) )
A_ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__UpperCamelCase )
if __name__ == "__main__":
main()
| 141
| 0
|
from ...processing_utils import ProcessorMixin
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = ["image_processor", "feature_extractor"]
_UpperCAmelCase : Any = "TvltImageProcessor"
_UpperCAmelCase : Union[str, Any] = "TvltFeatureExtractor"
def __init__( self : List[Any] , A : List[str] , A : Tuple ) ->str:
super().__init__(image_processor=A , feature_extractor=A )
lowerCamelCase__ : List[Any] = image_processor
lowerCamelCase__ : Optional[Any] = feature_extractor
def __call__( self : str , A : Union[str, Any]=None , A : int=None , A : Dict=None , A : Union[str, Any]=None , A : Tuple=False , A : List[str]=False , *A : Any , **A : Optional[Any] , ) ->str:
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
lowerCamelCase__ : Union[str, Any] = None
if images is not None:
lowerCamelCase__ : int = self.image_processor(A , mask_pixel=A , *A , **A )
if images_mixed is not None:
lowerCamelCase__ : Tuple = self.image_processor(A , is_mixed=A , *A , **A )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(
A , *A , sampling_rate=A , mask_audio=A , **A )
lowerCamelCase__ : Dict = {}
if audio is not None:
output_dict.update(A )
if images is not None:
output_dict.update(A )
if images_mixed_dict is not None:
output_dict.update(A )
return output_dict
@property
def __lowerCamelCase ( self : Tuple ) ->Optional[Any]:
lowerCamelCase__ : str = self.image_processor.model_input_names
lowerCamelCase__ : Tuple = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 130
|
import math
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : List[Any] = 0
while num > 0:
lowerCamelCase__ : Tuple = num % 8
lowerCamelCase__ : List[str] = octal + (remainder * math.floor(math.pow(10 , UpperCAmelCase ) ))
counter += 1
lowerCamelCase__ : Optional[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"0o{int(UpperCAmelCase )}"
def _a ( ) -> None:
"""simple docstring"""
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(216 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(512 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 130
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__ ( _a , _a , unittest.TestCase ):
a : Any = IFPipeline
a : str = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
a : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
a : int = PipelineTesterMixin.required_optional_params - {"""latents"""}
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE_ ( self : Any , A_ : int , A_ : Dict=0 ):
'''simple docstring'''
if str(A_ ).startswith("""mps""" ):
__lowercase = torch.manual_seed(A_ )
else:
__lowercase = torch.Generator(device=A_ ).manual_seed(A_ )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
self._test_save_load_local()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
__lowercase = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=A_ , tokenizer=A_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
__lowercase , __lowercase = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__lowercase = None
__lowercase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(A_ , A_ , A_ , A_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__lowercase = IFImgaImgPipeline(**pipe_a.components )
__lowercase = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(A_ , A_ , A_ , A_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__lowercase = IFInpaintingPipeline(**pipe_a.components )
__lowercase = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(A_ , A_ , A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : Any , A_ : int , A_ : str , A_ : Dict ):
'''simple docstring'''
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , A_ : Tuple , A_ : List[Any] , A_ : List[Any] , A_ : Any ):
'''simple docstring'''
_start_torch_memory_measurement()
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , A_ : List[Any] , A_ : str , A_ : List[Any] , A_ : List[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(A_ )
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(A_ )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def lowerCAmelCase_ ( ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 616
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__ ( _a , _a , unittest.TestCase ):
a : Any = IFPipeline
a : str = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
a : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
a : int = PipelineTesterMixin.required_optional_params - {"""latents"""}
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE_ ( self : Any , A_ : int , A_ : Dict=0 ):
'''simple docstring'''
if str(A_ ).startswith("""mps""" ):
__lowercase = torch.manual_seed(A_ )
else:
__lowercase = torch.Generator(device=A_ ).manual_seed(A_ )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
self._test_save_load_local()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
__lowercase = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=A_ , tokenizer=A_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
__lowercase , __lowercase = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__lowercase = None
__lowercase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(A_ , A_ , A_ , A_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__lowercase = IFImgaImgPipeline(**pipe_a.components )
__lowercase = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(A_ , A_ , A_ , A_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__lowercase = IFInpaintingPipeline(**pipe_a.components )
__lowercase = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(A_ , A_ , A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : Any , A_ : int , A_ : str , A_ : Dict ):
'''simple docstring'''
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , A_ : Tuple , A_ : List[Any] , A_ : List[Any] , A_ : Any ):
'''simple docstring'''
_start_torch_memory_measurement()
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , A_ : List[Any] , A_ : str , A_ : List[Any] , A_ : List[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(A_ )
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(A_ )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def lowerCAmelCase_ ( ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 616
| 1
|
import qiskit
def lowerCamelCase ( UpperCAmelCase_ : int = 2 )-> qiskit.result.counts.Counts:
"""simple docstring"""
a =qubits
# Using Aer's simulator
a =qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
a =qiskit.QuantumCircuit(UpperCAmelCase_ , UpperCAmelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , UpperCAmelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , UpperCAmelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(UpperCAmelCase_ ) ) , list(range(UpperCAmelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
a =qiskit.execute(UpperCAmelCase_ , UpperCAmelCase_ , shots=1000 )
return job.result().get_counts(UpperCAmelCase_ )
if __name__ == "__main__":
print(f"""Total count for various states are: {quantum_entanglement(3)}""")
| 321
|
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=30 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=10 , _lowerCAmelCase=0.02 , _lowerCAmelCase=None , ):
a =parent
a =batch_size
a =image_size
a =patch_size
a =num_channels
a =is_training
a =use_labels
a =hidden_size
a =num_hidden_layers
a =num_attention_heads
a =intermediate_size
a =hidden_act
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =type_sequence_label_size
a =initializer_range
a =scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a =(image_size // patch_size) ** 2
a =num_patches + 1
def lowerCAmelCase__ ( self ):
a =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a =None
if self.use_labels:
a =ids_tensor([self.batch_size] , self.type_sequence_label_size )
a =self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
a =ViTMSNModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a =model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
a =self.type_sequence_label_size
a =ViTMSNForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a =model(_lowerCAmelCase , labels=_lowerCAmelCase )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a =1
a =ViTMSNForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a =model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self ):
a =self.prepare_config_and_inputs()
a , a , a =config_and_inputs
a ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : str = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : List[str] = False
def lowerCAmelCase__ ( self ):
a =ViTMSNModelTester(self )
a =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def lowerCAmelCase__ ( self ):
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =model_class(_lowerCAmelCase )
a =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a =[*signature.parameters.keys()]
a =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self ):
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self ):
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a =ViTMSNModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCamelCase ( )-> Dict:
"""simple docstring"""
a =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self ):
torch.manual_seed(2 )
a =ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_lowerCAmelCase )
a =self.default_image_processor
a =prepare_img()
a =image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
a =model(**_lowerCAmelCase )
# verify the logits
a =torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
a =torch.tensor([-0.08_03, -0.44_54, -0.23_75] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 321
| 1
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Optional[int] , *__A :Optional[Any] , **__A :Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DPTImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 6
|
from math import ceil
def _lowerCamelCase( lowercase__ = 1_0_0_1 ) -> int:
'''simple docstring'''
__lowercase= 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__lowercase= 2 * i + 1
__lowercase= 2 * i
__lowercase= total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
lowerCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 230
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 721
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = StableDiffusionXLImgaImgPipeline
lowerCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
lowerCAmelCase : List[str] = PipelineTesterMixin.required_optional_params - {"latents"}
lowerCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,attention_head_dim=(2, 4) ,use_linear_projection=_snake_case ,addition_embed_type='''text_time''' ,addition_time_embed_dim=8 ,transformer_layers_per_block=(1, 2) ,projection_class_embeddings_input_dim=80 ,cross_attention_dim=64 ,)
lowercase__ : Union[str, Any] = EulerDiscreteScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,steps_offset=1 ,beta_schedule='''scaled_linear''' ,timestep_spacing='''leading''' ,)
torch.manual_seed(0 )
lowercase__ : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
lowercase__ : List[str] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act='''gelu''' ,projection_dim=32 ,)
lowercase__ : Optional[Any] = CLIPTextModel(_snake_case )
lowercase__ : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ,local_files_only=_snake_case )
lowercase__ : Tuple = CLIPTextModelWithProjection(_snake_case )
lowercase__ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ,local_files_only=_snake_case )
lowercase__ : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : Any=0 ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : int = floats_tensor((1, 3, 32, 32) ,rng=random.Random(_snake_case ) ).to(_snake_case )
lowercase__ : Tuple = image / 2 + 0.5
if str(_snake_case ).startswith('''mps''' ):
lowercase__ : int = torch.manual_seed(_snake_case )
else:
lowercase__ : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ : Dict = self.get_dummy_components()
lowercase__ : Tuple = StableDiffusionXLImgaImgPipeline(**_snake_case )
lowercase__ : Dict = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Dict = self.get_dummy_inputs(_snake_case )
lowercase__ : Dict = sd_pipe(**_snake_case ).images
lowercase__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ : Optional[int] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
pass
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ : int = self.get_dummy_components()
lowercase__ : Any = StableDiffusionXLImgaImgPipeline(**_snake_case )
lowercase__ : int = sd_pipe.to(_snake_case )
lowercase__ : List[Any] = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
# forward without prompt embeds
lowercase__ : Tuple = self.get_dummy_inputs(_snake_case )
lowercase__ : List[str] = 3 * ['''this is a negative prompt''']
lowercase__ : List[str] = negative_prompt
lowercase__ : Union[str, Any] = 3 * [inputs['''prompt''']]
lowercase__ : List[Any] = sd_pipe(**_snake_case )
lowercase__ : Any = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowercase__ : Optional[int] = self.get_dummy_inputs(_snake_case )
lowercase__ : List[str] = 3 * ['''this is a negative prompt''']
lowercase__ : List[str] = 3 * [inputs.pop('''prompt''' )]
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Optional[int] = sd_pipe.encode_prompt(_snake_case ,negative_prompt=_snake_case )
lowercase__ : Tuple = sd_pipe(
**_snake_case ,prompt_embeds=_snake_case ,negative_prompt_embeds=_snake_case ,pooled_prompt_embeds=_snake_case ,negative_pooled_prompt_embeds=_snake_case ,)
lowercase__ : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : int ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Any ,_snake_case : int ,_snake_case : Any="cpu" ,_snake_case : List[str]=torch.floataa ,_snake_case : Union[str, Any]=0 ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : Union[str, Any] = np.random.RandomState(_snake_case ).standard_normal((1, 4, 64, 64) )
lowercase__ : int = torch.from_numpy(_snake_case ).to(device=_snake_case ,dtype=_snake_case )
lowercase__ : List[Any] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ : Dict = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Tuple = self.get_inputs(_snake_case )
lowercase__ : Union[str, Any] = pipe(**_snake_case ).images
lowercase__ : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ : List[str] = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 122
| 0
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
snake_case__ = TOKENIZER_CLASSES
else:
snake_case__ = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + "Fast" )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
snake_case__ = TOKENIZER_CLASSES[tokenizer_name]
snake_case__ = True
if checkpoint_name is None:
snake_case__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
snake_case__ = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
snake_case__ = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
snake_case__ , snake_case__ = checkpoint.split("/" )
snake_case__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
snake_case__ = checkpoint
snake_case__ = dump_path
else:
snake_case__ = None
snake_case__ = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
snake_case__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
snake_case__ = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
snake_case__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
snake_case__ = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__lowerCAmelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
__magic_name__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 276
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase , lowerCamelCase = 13 , lowerCamelCase = 64 , lowerCamelCase = 2 , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = 1_28 , lowerCamelCase=[16, 32, 64, 1_28] , lowerCamelCase = 7 , lowerCamelCase = 4 , lowerCamelCase = 37 , lowerCamelCase = "gelu" , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 10 , lowerCamelCase = 0.0_2 , lowerCamelCase = 2 , lowerCamelCase = 1 , lowerCamelCase = 1_28 , lowerCamelCase = [2, 2, 2, 2] , lowerCamelCase = 2 , lowerCamelCase = 2 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = encoder_stride
snake_case__ = num_attention_outputs
snake_case__ = embed_dim
snake_case__ = embed_dim + 1
snake_case__ = resolution
snake_case__ = depths
snake_case__ = hidden_sizes
snake_case__ = dim
snake_case__ = mlp_expansion_ratio
def A_ ( self ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ = TFEfficientFormerModel(config=lowerCamelCase )
snake_case__ = model(lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ = self.type_sequence_label_size
snake_case__ = TFEfficientFormerForImageClassification(lowerCamelCase )
snake_case__ = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ = 1
snake_case__ = TFEfficientFormerForImageClassification(lowerCamelCase )
snake_case__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
_A : str = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_A : List[str] = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_A : Optional[Any] = False
_A : List[Any] = False
_A : Tuple = False
_A : List[Any] = False
_A : Any = False
def A_ ( self ):
snake_case__ = TFEfficientFormerModelTester(self )
snake_case__ = ConfigTester(
self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def A_ ( self ):
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def A_ ( self ):
pass
def A_ ( self ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(lowerCamelCase )
snake_case__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def A_ ( self ):
def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ = model_class(lowerCamelCase )
snake_case__ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
snake_case__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
if hasattr(self.model_tester , "encoder_seq_length" ):
snake_case__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
snake_case__ = seq_length * self.model_tester.chunk_length
else:
snake_case__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
snake_case__ = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCamelCase , (list, tuple) )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "seq_length" , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "decoder_seq_length" , lowerCamelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
snake_case__ = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def A_ ( self ):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = TFEfficientFormerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def A_ ( self ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = True
snake_case__ = getattr(self.model_tester , "seq_length" , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "encoder_seq_length" , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "key_length" , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "chunk_length" , lowerCamelCase )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
snake_case__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
snake_case__ = True
snake_case__ = False
snake_case__ = True
snake_case__ = model_class(lowerCamelCase )
snake_case__ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
snake_case__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ = True
snake_case__ = model_class(lowerCamelCase )
snake_case__ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
snake_case__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def A_ ( self ):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
snake_case__ = model_class(lowerCamelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
snake_case__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCamelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
snake_case__ = model(lowerCamelCase )
self.assertTrue(outputs_dict is not None )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def A_ ( self ):
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def A_ ( self ):
snake_case__ = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=lowerCamelCase , return_tensors="tf" )
# forward pass
snake_case__ = model(**lowerCamelCase , training=lowerCamelCase )
# verify the logits
snake_case__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
snake_case__ = tf.constant([-0.0_5_5_5, 0.4_8_2_5, -0.0_8_5_2] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def A_ ( self ):
snake_case__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=lowerCamelCase , return_tensors="tf" )
# forward pass
snake_case__ = model(**lowerCamelCase , training=lowerCamelCase )
# verify the logits
snake_case__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
snake_case__ = tf.constant([-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
| 276
| 1
|
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A : Dict = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, 'r', encoding='utf-8') as f:
A : str = json.load(f)
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ ( self :str , lowerCamelCase_ :Optional[Any] ) -> Any:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(snake_case__ )
def lowerCamelCase__ ( self :List[str] , lowerCamelCase_ :Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :int , lowerCamelCase_ :Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = f'facebook/wmt19-{pair}'
UpperCamelCase__ = self.get_tokenizer(snake_case__ )
UpperCamelCase__ = self.get_model(snake_case__ )
UpperCamelCase__ = bleu_data[pair]["src"]
UpperCamelCase__ = bleu_data[pair]["tgt"]
UpperCamelCase__ = tokenizer(snake_case__ , return_tensors="pt" , truncation=snake_case__ , padding="longest" ).to(snake_case__ )
UpperCamelCase__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCamelCase__ = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
UpperCamelCase__ = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["bleu"] , snake_case__ )
| 719
|
"""simple docstring"""
def snake_case__ ( _snake_case : str ):
"""simple docstring"""
UpperCamelCase__ = 0
# if input_string is "aba" than new_input_string become "a|b|a"
UpperCamelCase__ = ""
UpperCamelCase__ = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
UpperCamelCase__ , UpperCamelCase__ = 0, 0
# length[i] shows the length of palindromic substring with center i
UpperCamelCase__ = [1 for i in range(len(_snake_case ) )]
# for each character in new_string find corresponding palindromic string
UpperCamelCase__ = 0
for j in range(len(_snake_case ) ):
UpperCamelCase__ = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
UpperCamelCase__ = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
UpperCamelCase__ = j - k + 1 # noqa: E741
UpperCamelCase__ = j + k - 1
# update max_length and start position
if max_length < length[j]:
UpperCamelCase__ = length[j]
UpperCamelCase__ = j
# create that string
UpperCamelCase__ = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
| 0
|
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def a_ ( ):
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowercase__ : List[str] = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , _lowerCAmelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def a_ ( ):
'''simple docstring'''
assert _test_patching.open is open
lowercase__ : Optional[int] = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , _lowerCAmelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def a_ ( ):
'''simple docstring'''
lowercase__ : List[Any] = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , _lowerCAmelCase ):
pass
def a_ ( ):
'''simple docstring'''
lowercase__ : str = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , _lowerCAmelCase ) is None
with patch_submodule(_test_patching , 'len' , _lowerCAmelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = '__test_patch_submodule_start_and_stop_mock__'
lowercase__ : Dict = patch_submodule(_test_patching , 'open' , _lowerCAmelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def a_ ( ):
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowercase__ : Optional[int] = '__test_patch_submodule_successive_join__'
lowercase__ : List[Any] = '__test_patch_submodule_successive_dirname__'
lowercase__ : str = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , _lowerCAmelCase ):
with patch_submodule(_test_patching , 'os.rename' , _lowerCAmelCase ):
with patch_submodule(_test_patching , 'os.path.dirname' , _lowerCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , _lowerCAmelCase ):
with patch_submodule(_test_patching , 'os.path.join' , _lowerCAmelCase ):
with patch_submodule(_test_patching , 'os.path.dirname' , _lowerCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def a_ ( ):
'''simple docstring'''
lowercase__ : Any = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , _lowerCAmelCase ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , _lowerCAmelCase ):
pass
| 599
|
"""simple docstring"""
class UpperCAmelCase_ :
def __init__( self , a , a , a ) -> List[Any]:
lowercase__ : List[str] = name
lowercase__ : List[str] = value
lowercase__ : Tuple = weight
def __repr__( self ) -> Any:
return f"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def _UpperCAmelCase ( self ) -> Any:
return self.value
def _UpperCAmelCase ( self ) -> int:
return self.name
def _UpperCAmelCase ( self ) -> str:
return self.weight
def _UpperCAmelCase ( self ) -> int:
return self.value / self.weight
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : Union[str, Any] = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : Optional[Any] = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase )
lowercase__ : int = []
lowercase__ , lowercase__ : Dict = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 599
| 1
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( _UpperCamelCase ):
UpperCamelCase : int = ["image_processor", "tokenizer"]
UpperCamelCase : List[str] = "FlavaImageProcessor"
UpperCamelCase : List[Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , __magic_name__=None , __magic_name__=None , **__magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __magic_name__ , )
_lowerCAmelCase = kwargs.pop('feature_extractor' )
_lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__magic_name__ , __magic_name__ )
_lowerCAmelCase = self.image_processor
def __call__( self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , __magic_name__ = False , __magic_name__ = False , __magic_name__ = None , __magic_name__ = 0 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = True , __magic_name__ = None , **__magic_name__ , ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if images is not None:
_lowerCAmelCase = self.image_processor(
__magic_name__ , return_image_mask=__magic_name__ , return_codebook_pixels=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if text is not None and images is not None:
encoding.update(__magic_name__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def _lowerCamelCase ( self , *__magic_name__ , **__magic_name__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def _lowerCamelCase ( self , *__magic_name__ , **__magic_name__ ):
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __magic_name__ , )
return self.image_processor_class
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __magic_name__ , )
return self.image_processor
| 705
|
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=3_0 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=3_2 , __magic_name__=5 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1_0 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=0.6 , __magic_name__=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = mask_ratio
_lowerCAmelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCAmelCase = (image_size // patch_size) ** 2
_lowerCAmelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = ViTMAEModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = ViTMAEForPreTraining(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ )
_lowerCAmelCase = (self.image_size // self.patch_size) ** 2
_lowerCAmelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCAmelCase = 1
_lowerCAmelCase = ViTMAEForPreTraining(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase = model(__magic_name__ )
_lowerCAmelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : Dict = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
UpperCamelCase : Dict = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
UpperCamelCase : int = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Any = False
UpperCamelCase : Optional[Any] = False
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = ViTMAEModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=3_7 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__magic_name__ )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
np.random.seed(2 )
_lowerCAmelCase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCAmelCase = torch.from_numpy(__magic_name__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCAmelCase = pt_noise
super().check_pt_tf_models(__magic_name__ , __magic_name__ , __magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_lowerCAmelCase = outputs[0].cpu().numpy()
_lowerCAmelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__ )
_lowerCAmelCase = model_class.from_pretrained(__magic_name__ )
model.to(__magic_name__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
# Make sure we don't have nans
_lowerCAmelCase = after_outputs[0].cpu().numpy()
_lowerCAmelCase = 0
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__magic_name__ , 1e-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = ViTMAEModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
np.random.seed(2 )
_lowerCAmelCase = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(__magic_name__ )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=__magic_name__ , return_tensors='pt' ).to(__magic_name__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCAmelCase = ViTMAEConfig()
_lowerCAmelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCAmelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**__magic_name__ , noise=torch.from_numpy(__magic_name__ ).to(device=__magic_name__ ) )
# verify the logits
_lowerCAmelCase = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
_lowerCAmelCase = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__magic_name__ ) , atol=1e-4 ) )
| 309
| 0
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : int = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : int ='''umt5'''
a : Optional[Any] =['''past_key_values''']
def __init__( self , _lowerCamelCase=2_5_0_1_1_2 , _lowerCamelCase=5_1_2 , _lowerCamelCase=6_4 , _lowerCamelCase=1_0_2_4 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase=6 , _lowerCamelCase=3_2 , _lowerCamelCase=1_2_8 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-6 , _lowerCamelCase=1.0 , _lowerCamelCase="gated-gelu" , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="T5Tokenizer" , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=0 , **_lowerCamelCase , ):
super().__init__(
is_encoder_decoder=_lowerCamelCase , tokenizer_class=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
UpperCamelCase_: str = vocab_size
UpperCamelCase_: Any = d_model
UpperCamelCase_: Any = d_kv
UpperCamelCase_: Optional[Any] = d_ff
UpperCamelCase_: str = num_layers
UpperCamelCase_: Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCamelCase_: Optional[Any] = num_heads
UpperCamelCase_: List[str] = relative_attention_num_buckets
UpperCamelCase_: Union[str, Any] = relative_attention_max_distance
UpperCamelCase_: List[str] = dropout_rate
UpperCamelCase_: str = layer_norm_epsilon
UpperCamelCase_: Dict = initializer_factor
UpperCamelCase_: Optional[int] = feed_forward_proj
UpperCamelCase_: List[Any] = use_cache
UpperCamelCase_: Dict = self.feed_forward_proj.split('-' )
UpperCamelCase_: List[str] = act_info[-1]
UpperCamelCase_: str = act_info[0] == 'gated'
if len(_lowerCamelCase ) > 1 and act_info[0] != "gated" or len(_lowerCamelCase ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
UpperCamelCase_: int = 'gelu_new'
@property
def _a ( self ):
return self.d_model
@property
def _a ( self ):
return self.num_heads
@property
def _a ( self ):
return self.num_layers
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _a ( self ):
UpperCamelCase_: Dict = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
UpperCamelCase_: Tuple = 'past_encoder_sequence + sequence'
UpperCamelCase_: Any = {0: 'batch'}
UpperCamelCase_: Optional[int] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
UpperCamelCase_: Tuple = {0: 'batch', 1: 'decoder_sequence'}
UpperCamelCase_: Any = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _a ( self ):
return 1_3
@property
def _a ( self ):
return 5e-4
| 57
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCAmelCase__ :Dict = logging.get_logger(__name__)
lowerCAmelCase__ :Optional[int] = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __a ( UpperCAmelCase ):
_a : Tuple = 'perceiver'
def __init__( self , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=26 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="kv" , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=262 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=56 , _SCREAMING_SNAKE_CASE=[368, 496] , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=1920 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 16, 224, 224] , **_SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = num_latents
_UpperCAmelCase = d_latents
_UpperCAmelCase = d_model
_UpperCAmelCase = num_blocks
_UpperCAmelCase = num_self_attends_per_block
_UpperCAmelCase = num_self_attention_heads
_UpperCAmelCase = num_cross_attention_heads
_UpperCAmelCase = qk_channels
_UpperCAmelCase = v_channels
_UpperCAmelCase = cross_attention_shape_for_attention
_UpperCAmelCase = self_attention_widening_factor
_UpperCAmelCase = cross_attention_widening_factor
_UpperCAmelCase = hidden_act
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = use_query_residual
# masked language modeling attributes
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
# image classification attributes
_UpperCAmelCase = image_size
# flow attributes
_UpperCAmelCase = train_size
# multimodal autoencoding attributes
_UpperCAmelCase = num_frames
_UpperCAmelCase = audio_samples_per_frame
_UpperCAmelCase = samples_per_patch
_UpperCAmelCase = output_shape
class __a ( UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
"""simple docstring"""
return 1e-4
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 40 , _SCREAMING_SNAKE_CASE = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = preprocessor.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [' '.join(['a'] ) * seq_length] * batch_size
_UpperCAmelCase = dict(preprocessor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = inputs.pop('input_ids' )
return inputs
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch )
_UpperCAmelCase = self._generate_dummy_images(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = dict(preprocessor(images=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 618
| 0
|
import math
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while num > 0:
lowerCAmelCase = num % 8
lowerCAmelCase = octal + (remainder * math.floor(math.pow(10 , lowerCamelCase ) ))
counter += 1
lowerCAmelCase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f'''0o{int(lowerCamelCase )}'''
def a_ ( ):
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(65 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(216 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(512 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 705
|
'''simple docstring'''
from __future__ import annotations
def a_ ( lowerCamelCase : list , lowerCamelCase : int ):
# Checks if the entire collection has been sorted
if len(lowerCamelCase ) <= 1 or n <= 1:
return
insert_next(lowerCamelCase , n - 1 )
rec_insertion_sort(lowerCamelCase , n - 1 )
def a_ ( lowerCamelCase : list , lowerCamelCase : int ):
# Checks order between adjacent elements
if index >= len(lowerCamelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowerCAmelCase , lowerCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(lowerCamelCase , index + 1 )
if __name__ == "__main__":
__snake_case =input("""Enter integers separated by spaces: """)
__snake_case =[int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 513
| 0
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCamelCase ( lowercase_ = "isbn/0140328726" ) -> dict:
'''simple docstring'''
lowercase__ : Dict = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
lowercase__ : Optional[Any] = F'{olid} is not a valid Open Library olid'
raise ValueError(lowercase_ )
return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json()
def UpperCamelCase ( lowercase_ ) -> dict:
'''simple docstring'''
lowercase__ : Tuple = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
lowercase__ : List[Any] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowercase__ : Any = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
lowercase__ : Tuple = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : List[str] = """, """.join(lowercase_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCamelCase__ : Tuple = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
lowerCamelCase__ : Optional[Any] = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print("""\n""".join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 12
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class lowercase_ (Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class lowercase_ (_Item ):
"""simple docstring"""
def __init__( self : Optional[int] ):
super().__init__(lowercase__ ,lowercase__ )
def __bool__( self : List[str] ):
return False
lowerCAmelCase__ = _DeletedItem()
class lowercase_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ):
__lowercase = initial_block_size
__lowercase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowercase = capacity_factor
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ):
return hash(lowercase__ ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ):
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ):
__lowercase = self._buckets[ind]
if not stored:
__lowercase = _Item(lowercase__ ,lowercase__ )
self._len += 1
return True
elif stored.key == key:
__lowercase = _Item(lowercase__ ,lowercase__ )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
__lowercase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = self._buckets
__lowercase = [None] * new_size
__lowercase = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def SCREAMING_SNAKE_CASE ( self : str ):
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ):
__lowercase = self._get_bucket_index(lowercase__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowercase = self._get_next_ind(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
for ind in self._iterate_buckets(lowercase__ ):
if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ):
break
def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(lowercase__ ,lowercase__ )
def __delitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
raise KeyError(lowercase__ )
if item is _deleted:
continue
if item.key == key:
__lowercase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase__ )
def __len__( self : Optional[int] ):
return self._len
def __iter__( self : str ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
__lowercase = ''' ,'''.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 41
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 340
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __a ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] ):
# Construct model
if gpta_config_file == "":
a__ : str = GPTaConfig()
else:
a__ : List[str] = GPTaConfig.from_json_file(lowerCAmelCase__ )
a__ : Optional[int] = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
a__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
a__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 340
| 1
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase : List[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = graph
# mapping node to its parent in resulting breadth first tree
lowerCamelCase__ = {}
lowerCamelCase__ = source_vertex
def UpperCamelCase_ ( self ):
lowerCamelCase__ = {self.source_vertex}
lowerCamelCase__ = None
lowerCamelCase__ = [self.source_vertex] # first in first out queue
while queue:
lowerCamelCase__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_lowerCAmelCase )
lowerCamelCase__ = vertex
queue.append(_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCamelCase__ = self.parent.get(_lowerCAmelCase )
if target_vertex_parent is None:
lowerCamelCase__ = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(_lowerCAmelCase )
return self.shortest_path(_lowerCAmelCase ) + F'''->{target_vertex}'''
if __name__ == "__main__":
UpperCamelCase : List[Any] = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 50
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Union[str, Any] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase : Optional[int] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = ['''ChineseCLIPFeatureExtractor''']
__lowercase : int = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowercase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 706
|
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__lowercase : Dict = logging.getLogger(__name__)
@dataclass
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[float] = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
__lowerCamelCase : bool = field(default=snake_case , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
__lowerCamelCase : bool = field(
default=snake_case , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
__lowerCamelCase : bool = field(default=snake_case , metadata={'''help''': '''whether to use adafactor'''} )
__lowerCamelCase : Optional[float] = field(
default=snake_case , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
__lowerCamelCase : Optional[float] = field(
default=snake_case , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
__lowerCamelCase : Optional[float] = field(default=snake_case , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
__lowerCamelCase : Optional[float] = field(
default=snake_case , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
__lowerCamelCase : Optional[str] = field(
default='''linear''' , metadata={'''help''': f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 315
| 0
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """vocab.txt"""}
lowerCamelCase = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
lowerCamelCase = {
"""openbmb/cpm-ant-10b""": 10_24,
}
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> int:
a__ : List[str] = collections.OrderedDict()
with open(__UpperCamelCase , "r" , encoding="utf-8" ) as reader:
a__ : Optional[Any] = reader.readlines()
for index, token in enumerate(__UpperCamelCase ):
a__ : List[str] = token.rstrip("\n" )
a__ : Union[str, Any] = index
return vocab
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase="<unk>" , __UpperCAmelCase=200 ):
"""simple docstring"""
a__ : str = vocab
a__ : Union[str, Any] = unk_token
a__ : Optional[Any] = max_input_chars_per_word
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Union[str, Any] = list(__UpperCAmelCase )
if len(__UpperCAmelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
a__ : Optional[Any] = 0
a__ : Tuple = []
while start < len(__UpperCAmelCase ):
a__ : Dict = len(__UpperCAmelCase )
a__ : Union[str, Any] = None
while start < end:
a__ : List[Any] = "".join(chars[start:end] )
if substr in self.vocab:
a__ : List[str] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__UpperCAmelCase )
a__ : List[Any] = end
return sub_tokens
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :Tuple = VOCAB_FILES_NAMES
A :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A :List[str] = ["input_ids", "attention_mask"]
A :Optional[Any] = False
def __init__( self , __UpperCAmelCase , __UpperCAmelCase="<d>" , __UpperCAmelCase="</d>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="</n>" , __UpperCAmelCase="</_>" , __UpperCAmelCase="left" , **__UpperCAmelCase , ):
"""simple docstring"""
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=__UpperCAmelCase , eod_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , line_token=__UpperCAmelCase , space_token=__UpperCAmelCase , padding_side=__UpperCAmelCase , **__UpperCAmelCase , )
a__ : Any = bod_token
a__ : List[str] = eod_token
a__ : Optional[int] = load_vocab(__UpperCAmelCase )
a__ : Dict = self.encoder[space_token]
a__ : Dict = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
a__ : int = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __UpperCAmelCase : x[1] ) )
a__ : Optional[int] = {v: k for k, v in self.encoder.items()}
a__ : Optional[int] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _A ( self ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def _A ( self ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def _A ( self ):
"""simple docstring"""
return self.encoder["\n"]
@property
def _A ( self ):
"""simple docstring"""
return len(self.encoder )
def _A ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Optional[int] = []
for x in jieba.cut(__UpperCAmelCase , cut_all=__UpperCAmelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__UpperCAmelCase ) )
return output_tokens
def _A ( self , __UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
a__ : int = [i for i in token_ids if i >= 0]
a__ : Optional[int] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__UpperCAmelCase , **__UpperCAmelCase )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
return token in self.encoder
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
return "".join(__UpperCAmelCase )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.decoder.get(__UpperCAmelCase , self.unk_token )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
if os.path.isdir(__UpperCAmelCase ):
a__ : Tuple = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
a__ : Union[str, Any] = (filename_prefix + "-" if filename_prefix else "") + save_directory
a__ : Union[str, Any] = 0
if " " in self.encoder:
a__ : str = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
a__ : Optional[Any] = self.encoder["\n"]
del self.encoder["\n"]
a__ : List[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __UpperCAmelCase : x[1] ) )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
" Please check that the vocabulary is not corrupted!" )
a__ : str = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _A ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _A ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase ))
return [1] + ([0] * len(__UpperCAmelCase ))
| 191
|
from __future__ import annotations
import queue
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Union[str, Any] = data
a__ : Tuple = None
a__ : Optional[Any] = None
def SCREAMING_SNAKE_CASE( ) -> TreeNode:
print("\n********Press N to stop entering at any point of time********\n" )
a__ : Tuple = input("Enter the value of the root node: " ).strip().lower()
a__ : queue.Queue = queue.Queue()
a__ : str = TreeNode(int(__UpperCamelCase ) )
q.put(__UpperCamelCase )
while not q.empty():
a__ : str = q.get()
a__ : Optional[Any] = F'Enter the left node of {node_found.data}: '
a__ : List[Any] = input(__UpperCamelCase ).strip().lower() or "n"
if check == "n":
return tree_node
a__ : Optional[int] = TreeNode(int(__UpperCamelCase ) )
a__ : List[str] = left_node
q.put(__UpperCamelCase )
a__ : Dict = F'Enter the right node of {node_found.data}: '
a__ : List[str] = input(__UpperCamelCase ).strip().lower() or "n"
if check == "n":
return tree_node
a__ : str = TreeNode(int(__UpperCamelCase ) )
a__ : Any = right_node
q.put(__UpperCamelCase )
raise
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
a__ : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
a__ : Tuple = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
a__ : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
a__ : int = []
while not q.empty():
a__ : Union[str, Any] = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__UpperCamelCase )
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
a__ : list[TreeNode] = []
a__ : Optional[int] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(__UpperCamelCase )
a__ : int = n.left
# end of while means current node doesn't have left child
a__ : Dict = stack.pop()
# start to traverse its right child
a__ : str = n.right
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
a__ : list[TreeNode] = []
a__ : Dict = node
while n or stack:
while n:
stack.append(__UpperCamelCase )
a__ : List[Any] = n.left
a__ : str = stack.pop()
print(n.data , end="," )
a__ : Any = n.right
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
a__ , a__ : List[str] = [], []
a__ : List[Any] = node
stacka.append(__UpperCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
a__ : Optional[Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__UpperCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def SCREAMING_SNAKE_CASE( __UpperCamelCase = "" , __UpperCamelCase=50 , __UpperCamelCase="*" ) -> str:
if not s:
return "\n" + width * char
a__ , a__ : int = divmod(width - len(__UpperCamelCase ) - 2 , 2 )
return F'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
lowerCamelCase = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 191
| 1
|
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCAmelCase_ : str = threading.Lock()
UpperCAmelCase_ : Optional[logging.Handler] = None
UpperCAmelCase_ : Tuple = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
UpperCAmelCase_ : Optional[Any] = logging.WARNING
UpperCAmelCase_ : Any = True
def _A () -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = os.getenv('''TRANSFORMERS_VERBOSITY''' , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
f'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def _A () -> str:
"""simple docstring"""
return __name__.split('''.''' )[0]
def _A () -> logging.Logger:
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def _A () -> None:
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
SCREAMING_SNAKE_CASE_ : List[str] = logging.StreamHandler() # Set sys.stderr as stream.
SCREAMING_SNAKE_CASE_ : Optional[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
def _A () -> None:
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
SCREAMING_SNAKE_CASE_ : Dict = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
def _A () -> Union[str, Any]:
"""simple docstring"""
return log_levels
def _A (__a = None ) -> logging.Logger:
"""simple docstring"""
if name is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__a )
def _A () -> int:
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _A (__a ) -> None:
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(__a )
def _A () -> Optional[Any]:
"""simple docstring"""
return set_verbosity(__a )
def _A () -> Optional[int]:
"""simple docstring"""
return set_verbosity(__a )
def _A () -> Any:
"""simple docstring"""
return set_verbosity(__a )
def _A () -> Union[str, Any]:
"""simple docstring"""
return set_verbosity(__a )
def _A () -> None:
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _A () -> None:
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _A (__a ) -> None:
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__a )
def _A (__a ) -> None:
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__a )
def _A () -> None:
"""simple docstring"""
_configure_library_root_logger()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
def _A () -> None:
"""simple docstring"""
_configure_library_root_logger()
SCREAMING_SNAKE_CASE_ : str = True
def _A () -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = _get_library_root_logger().handlers
for handler in handlers:
SCREAMING_SNAKE_CASE_ : List[str] = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(__a )
def _A () -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__a )
def _A (self , *__a , **__a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , __a )
if no_advisory_warnings:
return
self.warning(*__a , **__a )
UpperCAmelCase_ : Any = warning_advice
@functools.lru_cache(__a )
def _A (self , *__a , **__a ) -> Tuple:
"""simple docstring"""
self.warning(*__a , **__a )
UpperCAmelCase_ : Optional[int] = warning_once
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , *lowercase_ : Any , **lowercase_ : int): # pylint: disable=unused-argument
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = args[0] if args else None
def __iter__( self : int):
'''simple docstring'''
return iter(self._iterator)
def __getattr__( self : Optional[int] , lowercase_ : List[str]):
'''simple docstring'''
def empty_fn(*lowercase_ : List[Any] , **lowercase_ : Dict): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any]):
'''simple docstring'''
return self
def __exit__( self : Tuple , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : int):
'''simple docstring'''
return
class lowerCAmelCase__ :
'''simple docstring'''
def __call__( self : int , *lowercase_ : Optional[Any] , **lowercase_ : Dict):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*lowercase_ , **lowercase_)
else:
return EmptyTqdm(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowercase_ : List[Any] , **lowercase_ : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase_ : List[Any] = _tqdm_cls()
def _A () -> bool:
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def _A () -> Tuple:
"""simple docstring"""
global _tqdm_active
SCREAMING_SNAKE_CASE_ : int = True
hf_hub_utils.enable_progress_bars()
def _A () -> int:
"""simple docstring"""
global _tqdm_active
SCREAMING_SNAKE_CASE_ : Any = False
hf_hub_utils.disable_progress_bars()
| 714
|
"""simple docstring"""
# Algorithm for the pigeonhole sorting
def _A (__a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = min(__a ) # min() finds the minimum value
SCREAMING_SNAKE_CASE_ : int = max(__a ) # max() finds the maximum value
SCREAMING_SNAKE_CASE_ : Dict = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
SCREAMING_SNAKE_CASE_ : Any = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__a , __a ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
SCREAMING_SNAKE_CASE_ : Any = 0
for count in range(__a ):
while holes[count] > 0:
holes[count] -= 1
SCREAMING_SNAKE_CASE_ : Dict = count + min_val
i += 1
def _A () -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__a )
print('''Sorted order is:''' , ''' '''.join(__a ) )
if __name__ == "__main__":
main()
| 176
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowerCAmelCase =logging.get_logger(__name__)
if is_vision_available():
import PIL
class __magic_name__ ( _a):
_UpperCAmelCase : Union[str, Any] = ['pixel_values']
def __init__( self : Any ,__SCREAMING_SNAKE_CASE : bool = True ,__SCREAMING_SNAKE_CASE : Dict[str, int] = None ,__SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC ,__SCREAMING_SNAKE_CASE : bool = True ,__SCREAMING_SNAKE_CASE : Dict[str, int] = None ,__SCREAMING_SNAKE_CASE : bool = True ,__SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 2_5_5 ,__SCREAMING_SNAKE_CASE : bool = True ,__SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None ,__SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None ,__SCREAMING_SNAKE_CASE : bool = True ,**__SCREAMING_SNAKE_CASE : str ,):
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase = size if size is not None else {"shortest_edge": 2_2_4}
UpperCAmelCase = get_size_dict(__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
UpperCAmelCase = get_size_dict(__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE ,param_name="crop_size" )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase = do_convert_rgb
def _UpperCAmelCase ( self : List[str] ,__SCREAMING_SNAKE_CASE : np.ndarray ,__SCREAMING_SNAKE_CASE : Dict[str, int] ,__SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC ,__SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ,**__SCREAMING_SNAKE_CASE : str ,):
UpperCAmelCase = get_size_dict(__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCAmelCase = get_resize_output_image_size(__SCREAMING_SNAKE_CASE ,size=size["shortest_edge"] ,default_to_square=__SCREAMING_SNAKE_CASE )
return resize(__SCREAMING_SNAKE_CASE ,size=__SCREAMING_SNAKE_CASE ,resample=__SCREAMING_SNAKE_CASE ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[Any] ,__SCREAMING_SNAKE_CASE : np.ndarray ,__SCREAMING_SNAKE_CASE : Dict[str, int] ,__SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ,**__SCREAMING_SNAKE_CASE : Tuple ,):
UpperCAmelCase = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__SCREAMING_SNAKE_CASE ,size=(size["height"], size["width"]) ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : np.ndarray ,__SCREAMING_SNAKE_CASE : Union[int, float] ,__SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ,**__SCREAMING_SNAKE_CASE : Optional[Any] ,):
return rescale(__SCREAMING_SNAKE_CASE ,scale=__SCREAMING_SNAKE_CASE ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[int] ,__SCREAMING_SNAKE_CASE : np.ndarray ,__SCREAMING_SNAKE_CASE : Union[float, List[float]] ,__SCREAMING_SNAKE_CASE : Union[float, List[float]] ,__SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ,**__SCREAMING_SNAKE_CASE : List[Any] ,):
return normalize(__SCREAMING_SNAKE_CASE ,mean=__SCREAMING_SNAKE_CASE ,std=__SCREAMING_SNAKE_CASE ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : ImageInput ,__SCREAMING_SNAKE_CASE : bool = None ,__SCREAMING_SNAKE_CASE : Dict[str, int] = None ,__SCREAMING_SNAKE_CASE : PILImageResampling = None ,__SCREAMING_SNAKE_CASE : bool = None ,__SCREAMING_SNAKE_CASE : int = None ,__SCREAMING_SNAKE_CASE : bool = None ,__SCREAMING_SNAKE_CASE : float = None ,__SCREAMING_SNAKE_CASE : bool = None ,__SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None ,__SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None ,__SCREAMING_SNAKE_CASE : bool = None ,__SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None ,__SCREAMING_SNAKE_CASE : Optional[ChannelDimension] = ChannelDimension.FIRST ,**__SCREAMING_SNAKE_CASE : List[Any] ,):
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(__SCREAMING_SNAKE_CASE ,param_name="size" ,default_to_square=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(__SCREAMING_SNAKE_CASE ,param_name="crop_size" ,default_to_square=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase = [convert_to_rgb(__SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=__SCREAMING_SNAKE_CASE ,size=__SCREAMING_SNAKE_CASE ,resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(image=__SCREAMING_SNAKE_CASE ,size=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=__SCREAMING_SNAKE_CASE ,scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=__SCREAMING_SNAKE_CASE ,mean=__SCREAMING_SNAKE_CASE ,std=__SCREAMING_SNAKE_CASE ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) for image in images]
UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE ,tensor_type=__SCREAMING_SNAKE_CASE )
| 333
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __magic_name__ ( unittest.TestCase):
def __init__( self : List[str] ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : List[str]=1_3 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=7 ,__SCREAMING_SNAKE_CASE : str=True ,__SCREAMING_SNAKE_CASE : Union[str, Any]=True ,__SCREAMING_SNAKE_CASE : List[Any]=True ,__SCREAMING_SNAKE_CASE : Tuple=True ,__SCREAMING_SNAKE_CASE : Dict=9_9 ,__SCREAMING_SNAKE_CASE : List[Any]=3_2 ,__SCREAMING_SNAKE_CASE : Dict=5 ,__SCREAMING_SNAKE_CASE : Optional[Any]=4 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=3_7 ,__SCREAMING_SNAKE_CASE : Optional[Any]="gelu" ,__SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 ,__SCREAMING_SNAKE_CASE : Any=0.1 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=5_1_2 ,__SCREAMING_SNAKE_CASE : int=1_6 ,__SCREAMING_SNAKE_CASE : Dict=2 ,__SCREAMING_SNAKE_CASE : Dict=0.02 ,__SCREAMING_SNAKE_CASE : Optional[int]=4 ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_attention_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_choices
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_attention_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__SCREAMING_SNAKE_CASE ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = True
UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __magic_name__ ( _a , unittest.TestCase):
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : str = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def _UpperCAmelCase ( self : Optional[Any] ):
for model_class_name in self.all_model_classes:
UpperCAmelCase = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" ,from_pt=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class __magic_name__ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self : Optional[int] ):
UpperCAmelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" ,from_pt=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ,dtype=jnp.intaa )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) ,__SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
UpperCAmelCase = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] ,dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" ,from_pt=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ,dtype=jnp.intaa )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
UpperCAmelCase = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] ,dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
| 333
| 1
|
"""simple docstring"""
import operator
def _snake_case ( UpperCAmelCase_ : list , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : list | None = None ):
A__ = operator.lt if reverse else operator.gt
A__ = solution or []
if not arr:
return solution
A__ = [arr.pop(0 )]
for i, item in enumerate(UpperCAmelCase_ ):
if _operator(UpperCAmelCase_ , sublist[-1] ):
sublist.append(UpperCAmelCase_ )
arr.pop(UpperCAmelCase_ )
# merging sublist into solution list
if not solution:
solution.extend(UpperCAmelCase_ )
else:
while sublist:
A__ = sublist.pop(0 )
for i, xx in enumerate(UpperCAmelCase_ ):
if not _operator(UpperCAmelCase_ , UpperCAmelCase_ ):
solution.insert(UpperCAmelCase_ , UpperCAmelCase_ )
break
else:
solution.append(UpperCAmelCase_ )
strand_sort(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 500
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_ : List[str] = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : List[Any] = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 500
| 1
|
'''simple docstring'''
def snake_case_ (UpperCamelCase : list[int] ):
'''simple docstring'''
if not numbers:
return 0
if not isinstance(UpperCamelCase , (list, tuple) ) or not all(
isinstance(UpperCamelCase , UpperCamelCase ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
_a = _a = _a = numbers[0]
for i in range(1 , len(UpperCamelCase ) ):
# update the maximum and minimum subarray products
_a = numbers[i]
if number < 0:
_a , _a = min_till_now, max_till_now
_a = max(UpperCamelCase , max_till_now * number )
_a = min(UpperCamelCase , min_till_now * number )
# update the maximum product found till now
_a = max(UpperCamelCase , UpperCamelCase )
return max_prod
| 22
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A ( unittest.TestCase ):
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=18 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : Optional[int]=4_00 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , ) -> Optional[Any]:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A ( _a ,unittest.TestCase ):
lowercase_ = ImageGPTImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = ImageGPTImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , '''clusters''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_normalize''' ) )
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
_a = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(lowerCAmelCase_ , '''image_processor.json''' )
image_processor_first.to_json_file(lowerCAmelCase_ )
_a = self.image_processing_class.from_json_file(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase_ )
_a = self.image_processing_class.from_pretrained(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case_ ():
'''simple docstring'''
_a = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
_a = Image.open(dataset[4]['''file'''] )
_a = Image.open(dataset[5]['''file'''] )
_a = [imagea, imagea]
return images
@require_vision
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
_a = prepare_images()
# test non-batched
_a = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
_a = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase_ )
# test batched
_a = image_processing(lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
_a = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase_ )
| 22
| 1
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase_ ( __a = "laptop" ) -> DataFrame:
a__ : Any = f'''https://www.amazon.in/laptop/s?k={product}'''
a__ : List[Any] = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
a__ : Optional[Any] = BeautifulSoup(requests.get(__a , headers=__a ).text )
# Initialize a Pandas dataframe with the column titles
a__ : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
a__ : Union[str, Any] = item.ha.text
a__ : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
a__ : int = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
a__ : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
a__ : str = "Not available"
try:
a__ : List[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
a__ : List[str] = ""
try:
a__ : Any = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
a__ : str = float("nan" )
except AttributeError:
pass
a__ : List[str] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
a__ : Tuple = " "
a__ : Union[str, Any] = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCamelCase : str = """headphones"""
get_amazon_product_data(product).to_csv(f"""Amazon Product Data for {product}.csv""")
| 151
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCamelCase : Any = logging.get_logger(__name__)
class A__ ( A__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : float , **lowerCamelCase__ : List[Any] ):
a__ : int = feature_size
a__ : List[str] = sampling_rate
a__ : Optional[Any] = padding_value
a__ : List[Any] = kwargs.pop("padding_side" , "right" )
a__ : Optional[int] = kwargs.pop("return_attention_mask" , lowerCamelCase__ )
super().__init__(**lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = True , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
a__ : Optional[Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
a__ : Union[str, Any] = processed_features[self.model_input_names[0]]
a__ : Any = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase__ ) == 0:
if return_attention_mask:
a__ : Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
a__ : List[str] = required_input[0]
if isinstance(lowerCamelCase__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
a__ : Tuple = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase__ ):
a__ : Optional[int] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase__ ):
a__ : List[Any] = "tf"
elif is_torch_tensor(lowerCamelCase__ ):
a__ : int = "pt"
elif isinstance(lowerCamelCase__ , (int, float, list, tuple, np.ndarray) ):
a__ : str = "np"
else:
raise ValueError(
f'''type of {first_element} unknown: {type(lowerCamelCase__ )}. '''
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
a__ : str = to_numpy(lowerCamelCase__ )
else:
a__ : Any = [to_numpy(lowerCamelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
a__ : Union[str, Any] = self._get_padding_strategies(padding=lowerCamelCase__ , max_length=lowerCamelCase__ )
a__ : Tuple = processed_features[self.model_input_names[0]]
a__ : Optional[Any] = len(lowerCamelCase__ )
if not all(len(lowerCamelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
a__ : Dict = []
for i in range(lowerCamelCase__ ):
a__ : int = {k: v[i] for k, v in processed_features.items()}
# truncation
a__ : Any = self._truncate(
lowerCamelCase__ , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , truncation=lowerCamelCase__ , )
truncated_inputs.append(lowerCamelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
a__ : Union[str, Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
a__ : Any = PaddingStrategy.MAX_LENGTH
a__ : Optional[int] = {}
for i in range(lowerCamelCase__ ):
# padding
a__ : Optional[Any] = self._pad(
truncated_inputs[i] , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
for key, value in outputs.items():
if key not in batch_outputs:
a__ : Dict = []
if value.dtype is np.dtype(np.floataa ):
a__ : Optional[int] = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase__ )
return BatchFeature(lowerCamelCase__ , tensor_type=lowerCamelCase__ )
def _UpperCamelCase( self : int , lowerCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ):
a__ : Optional[Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
a__ : Tuple = len(lowerCamelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a__ : List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a__ : Any = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
a__ : Optional[int] = np.ones(len(lowerCamelCase__ ) , dtype=np.intaa )
if needs_to_be_padded:
a__ : List[str] = max_length - len(lowerCamelCase__ )
if self.padding_side == "right":
if return_attention_mask:
a__ : Tuple = np.pad(
processed_features["attention_mask"] , (0, difference) )
a__ : int = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
a__ : List[Any] = np.pad(
lowerCamelCase__ , lowerCamelCase__ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
a__ : List[str] = np.pad(
processed_features["attention_mask"] , (difference, 0) )
a__ : Any = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
a__ : List[str] = np.pad(
lowerCamelCase__ , lowerCamelCase__ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
a__ : Optional[Any] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a__ : int = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a__ : List[str] = len(lowerCamelCase__ ) > max_length
if needs_to_be_truncated:
a__ : Optional[int] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
a__ : List[str] = processed_features["attention_mask"][:max_length]
return processed_features
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Tuple=None ):
# Get padding strategy
if padding is not False:
if padding is True:
a__ : Tuple = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
a__ : Tuple = PaddingStrategy(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
a__ : Optional[Any] = padding
else:
a__ : Any = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 151
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = "trocr"
_UpperCAmelCase :str = ["past_key_values"]
_UpperCAmelCase :Any = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=512 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
lowercase__: List[Any] = vocab_size
lowercase__: List[str] = d_model
lowercase__: Tuple = decoder_layers
lowercase__: Any = decoder_attention_heads
lowercase__: Dict = decoder_ffn_dim
lowercase__: Union[str, Any] = activation_function
lowercase__: List[str] = max_position_embeddings
lowercase__: Optional[Any] = dropout
lowercase__: Tuple = attention_dropout
lowercase__: List[str] = activation_dropout
lowercase__: Union[str, Any] = init_std
lowercase__: str = decoder_layerdrop
lowercase__: int = use_cache
lowercase__: str = scale_embedding
lowercase__: List[str] = use_learned_position_embeddings
lowercase__: List[Any] = layernorm_embedding
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 586
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = "distilbert"
_UpperCAmelCase :Any = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=512 , _UpperCAmelCase=False , _UpperCAmelCase=6 , _UpperCAmelCase=12 , _UpperCAmelCase=768 , _UpperCAmelCase=4 * 768 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.02 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.2 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
lowercase__: Any = vocab_size
lowercase__: Optional[int] = max_position_embeddings
lowercase__: int = sinusoidal_pos_embds
lowercase__: Dict = n_layers
lowercase__: List[str] = n_heads
lowercase__: Tuple = dim
lowercase__: Union[str, Any] = hidden_dim
lowercase__: List[str] = dropout
lowercase__: Optional[int] = attention_dropout
lowercase__: Dict = activation
lowercase__: Union[str, Any] = initializer_range
lowercase__: Optional[Any] = qa_dropout
lowercase__: Dict = seq_classif_dropout
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
lowercase__: Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__: Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 586
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 202
|
lowerCamelCase__ = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 202
| 1
|
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class __lowerCamelCase (unittest.TestCase ):
def __init__( self: int,A_: List[str],A_: bool = True,A_: Dict[str, int] = None,A_: int = 32,A_: bool = True,A_: Union[int, float] = 1 / 255,A_: bool = True,A_: bool = True,A_: Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],A_: Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],A_: bool = True,A_: Union[str, Any]=7,A_: Dict=30,A_: Dict=400,A_: Optional[int]=3,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = do_resize
__UpperCamelCase = size if size is not None else {'shortest_edge': 288}
__UpperCamelCase = size_divisor
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = do_center_crop
__UpperCamelCase = image_mean
__UpperCamelCase = image_std
__UpperCamelCase = do_pad
__UpperCamelCase = batch_size
__UpperCamelCase = num_channels
__UpperCamelCase = min_resolution
__UpperCamelCase = max_resolution
def snake_case_ ( self: Dict ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def snake_case_ ( self: List[str],A_: Tuple,A_: List[Any]=False ):
'''simple docstring'''
if not batched:
__UpperCamelCase = self.size['shortest_edge']
__UpperCamelCase = image_inputs[0]
if isinstance(A_,Image.Image ):
__UpperCamelCase, __UpperCamelCase = image.size
else:
__UpperCamelCase, __UpperCamelCase = image.shape[1], image.shape[2]
__UpperCamelCase = size / min(A_,A_ )
if h < w:
__UpperCamelCase, __UpperCamelCase = size, scale * w
else:
__UpperCamelCase, __UpperCamelCase = scale * h, size
__UpperCamelCase = int((1333 / 800) * size )
if max(A_,A_ ) > max_size:
__UpperCamelCase = max_size / max(A_,A_ )
__UpperCamelCase = newh * scale
__UpperCamelCase = neww * scale
__UpperCamelCase, __UpperCamelCase = int(newh + 0.5 ), int(neww + 0.5 )
__UpperCamelCase, __UpperCamelCase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__UpperCamelCase = []
for image in image_inputs:
__UpperCamelCase, __UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCamelCase = max(A_,key=lambda A_ : item[0] )[0]
__UpperCamelCase = max(A_,key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = BridgeTowerImageProcessor if is_vision_available() else None
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = BridgeTowerImageProcessingTester(self )
@property
def snake_case_ ( self: int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_,'image_mean' ) )
self.assertTrue(hasattr(A_,'image_std' ) )
self.assertTrue(hasattr(A_,'do_normalize' ) )
self.assertTrue(hasattr(A_,'do_resize' ) )
self.assertTrue(hasattr(A_,'size' ) )
self.assertTrue(hasattr(A_,'size_divisor' ) )
def snake_case_ ( self: Dict ):
'''simple docstring'''
pass
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester,equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_,Image.Image )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
__UpperCamelCase, __UpperCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
__UpperCamelCase = image_processing(A_,return_tensors='pt' ).pixel_values
__UpperCamelCase, __UpperCamelCase = self.image_processor_tester.get_expected_values(A_,batched=A_ )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester,equal_resolution=A_,numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_,np.ndarray )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
__UpperCamelCase, __UpperCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
__UpperCamelCase = image_processing(A_,return_tensors='pt' ).pixel_values
__UpperCamelCase, __UpperCamelCase = self.image_processor_tester.get_expected_values(A_,batched=A_ )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester,equal_resolution=A_,torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_,torch.Tensor )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
__UpperCamelCase, __UpperCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
__UpperCamelCase = image_processing(A_,return_tensors='pt' ).pixel_values
__UpperCamelCase, __UpperCamelCase = self.image_processor_tester.get_expected_values(A_,batched=A_ )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
| 1
|
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( a_ ,unittest.TestCase ):
__lowerCAmelCase = FunnelTokenizer
__lowerCAmelCase = FunnelTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def snake_case_ ( self ):
super().setUp()
a_ : int = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def snake_case_ ( self , **a_ ):
return FunnelTokenizer.from_pretrained(self.tmpdirname , **a_ )
def snake_case_ ( self , **a_ ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def snake_case_ ( self , a_ ):
a_ : int = "UNwant\u00E9d,running"
a_ : List[Any] = "unwanted, running"
return input_text, output_text
def snake_case_ ( self ):
a_ : int = self.tokenizer_class(self.vocab_file )
a_ : List[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(a_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [7, 4, 5, 1_0, 8, 9] )
def snake_case_ ( self ):
a_ : List[str] = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
a_ : Dict = tokenizer("UNwant\u00E9d,running" )
a_ : int = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
a_ : Dict = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 237
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ : List[str] = logging.get_logger(__name__)
lowercase_ : Any = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__a = "swin"
__a = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , lowerCAmelCase=224 , lowerCAmelCase=4 , lowerCAmelCase=3 , lowerCAmelCase=96 , lowerCAmelCase=[2, 2, 6, 2] , lowerCAmelCase=[3, 6, 12, 24] , lowerCAmelCase=7 , lowerCAmelCase=4.0 , lowerCAmelCase=True , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase="gelu" , lowerCAmelCase=False , lowerCAmelCase=0.02 , lowerCAmelCase=1e-5 , lowerCAmelCase=32 , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ) -> Tuple:
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= image_size
SCREAMING_SNAKE_CASE__: Optional[int]= patch_size
SCREAMING_SNAKE_CASE__: Tuple= num_channels
SCREAMING_SNAKE_CASE__: Union[str, Any]= embed_dim
SCREAMING_SNAKE_CASE__: List[str]= depths
SCREAMING_SNAKE_CASE__: Any= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= num_heads
SCREAMING_SNAKE_CASE__: List[str]= window_size
SCREAMING_SNAKE_CASE__: Any= mlp_ratio
SCREAMING_SNAKE_CASE__: int= qkv_bias
SCREAMING_SNAKE_CASE__: Dict= hidden_dropout_prob
SCREAMING_SNAKE_CASE__: Union[str, Any]= attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__: Any= drop_path_rate
SCREAMING_SNAKE_CASE__: Optional[int]= hidden_act
SCREAMING_SNAKE_CASE__: Union[str, Any]= use_absolute_embeddings
SCREAMING_SNAKE_CASE__: Tuple= layer_norm_eps
SCREAMING_SNAKE_CASE__: str= initializer_range
SCREAMING_SNAKE_CASE__: Dict= encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE__: Optional[Any]= int(embed_dim * 2 ** (len(lowerCAmelCase ) - 1) )
SCREAMING_SNAKE_CASE__: List[Any]= ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(lowerCAmelCase ) + 1 )]
SCREAMING_SNAKE_CASE__: Optional[int]= get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names )
class _lowerCamelCase ( UpperCamelCase_ ):
__a = version.parse("1.11" )
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase_ ( self ) -> float:
return 1e-4
| 712
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase_ : List[str] = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def A__ ( snake_case_ : Optional[int] ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def A__ ( snake_case_ : Optional[int] ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__: Optional[int]= terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 107
| 0
|
__lowerCAmelCase = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 147
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
a_ = logging.getLogger(__name__)
@dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Train language if it is different from the evaluation language."""} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
_lowerCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def __lowercase ( ) ->List[str]:
'''simple docstring'''
__A : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__A , __A , __A : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' ,snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__A : Tuple = training_args.get_process_log_level()
logger.setLevel(snake_case_ )
datasets.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__A : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__A : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
__A : Dict = load_dataset(
'''xnli''' ,model_args.language ,split='''train''' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
__A : List[str] = load_dataset(
'''xnli''' ,model_args.train_language ,split='''train''' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
__A : List[Any] = train_dataset.features['''label'''].names
if training_args.do_eval:
__A : int = load_dataset(
'''xnli''' ,model_args.language ,split='''validation''' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
__A : List[Any] = eval_dataset.features['''label'''].names
if training_args.do_predict:
__A : Dict = load_dataset(
'''xnli''' ,model_args.language ,split='''test''' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
__A : List[Any] = predict_dataset.features['''label'''].names
# Labels
__A : Dict = len(snake_case_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__A : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=snake_case_ ,idalabel={str(snake_case_ ): label for i, label in enumerate(snake_case_ )} ,labelaid={label: i for i, label in enumerate(snake_case_ )} ,finetuning_task='''xnli''' ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
__A : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,do_lower_case=model_args.do_lower_case ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
__A : str = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=snake_case_ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
__A : Tuple = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__A : List[str] = False
def preprocess_function(snake_case_ : str ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] ,examples['''hypothesis'''] ,padding=snake_case_ ,max_length=data_args.max_seq_length ,truncation=snake_case_ ,)
if training_args.do_train:
if data_args.max_train_samples is not None:
__A : Dict = min(len(snake_case_ ) ,data_args.max_train_samples )
__A : Any = train_dataset.select(range(snake_case_ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
__A : Dict = train_dataset.map(
snake_case_ ,batched=snake_case_ ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on train dataset''' ,)
# Log a few random samples from the training set:
for index in random.sample(range(len(snake_case_ ) ) ,3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__A : Optional[Any] = min(len(snake_case_ ) ,data_args.max_eval_samples )
__A : Any = eval_dataset.select(range(snake_case_ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
__A : Dict = eval_dataset.map(
snake_case_ ,batched=snake_case_ ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on validation dataset''' ,)
if training_args.do_predict:
if data_args.max_predict_samples is not None:
__A : Union[str, Any] = min(len(snake_case_ ) ,data_args.max_predict_samples )
__A : Union[str, Any] = predict_dataset.select(range(snake_case_ ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
__A : List[str] = predict_dataset.map(
snake_case_ ,batched=snake_case_ ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on prediction dataset''' ,)
# Get the metric function
__A : Dict = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(snake_case_ : EvalPrediction ):
__A : List[str] = p.predictions[0] if isinstance(p.predictions ,snake_case_ ) else p.predictions
__A : int = np.argmax(snake_case_ ,axis=1 )
return metric.compute(predictions=snake_case_ ,references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__A : Tuple = default_data_collator
elif training_args.fpaa:
__A : str = DataCollatorWithPadding(snake_case_ ,pad_to_multiple_of=8 )
else:
__A : Dict = None
# Initialize our Trainer
__A : str = Trainer(
model=snake_case_ ,args=snake_case_ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=snake_case_ ,tokenizer=snake_case_ ,data_collator=snake_case_ ,)
# Training
if training_args.do_train:
__A : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
__A : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__A : Any = last_checkpoint
__A : int = trainer.train(resume_from_checkpoint=snake_case_ )
__A : Union[str, Any] = train_result.metrics
__A : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case_ )
)
__A : Any = min(snake_case_ ,len(snake_case_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,snake_case_ )
trainer.save_metrics('''train''' ,snake_case_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__A : Any = trainer.evaluate(eval_dataset=snake_case_ )
__A : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(snake_case_ )
__A : str = min(snake_case_ ,len(snake_case_ ) )
trainer.log_metrics('''eval''' ,snake_case_ )
trainer.save_metrics('''eval''' ,snake_case_ )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__A , __A , __A : Union[str, Any] = trainer.predict(snake_case_ ,metric_key_prefix='''predict''' )
__A : Dict = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(snake_case_ )
)
__A : Optional[Any] = min(snake_case_ ,len(snake_case_ ) )
trainer.log_metrics('''predict''' ,snake_case_ )
trainer.save_metrics('''predict''' ,snake_case_ )
__A : Optional[int] = np.argmax(snake_case_ ,axis=1 )
__A : Dict = os.path.join(training_args.output_dir ,'''predictions.txt''' )
if trainer.is_world_process_zero():
with open(snake_case_ ,'''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(snake_case_ ):
__A : str = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 177
| 0
|
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = checkpoint
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : Tuple = vae_state_dict['encoder.conv_in.weight']
SCREAMING_SNAKE_CASE_ : Optional[int] = vae_state_dict['encoder.conv_in.bias']
SCREAMING_SNAKE_CASE_ : Optional[int] = vae_state_dict['encoder.conv_out.weight']
SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict['encoder.conv_out.bias']
SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict['encoder.norm_out.weight']
SCREAMING_SNAKE_CASE_ : List[str] = vae_state_dict['encoder.norm_out.bias']
SCREAMING_SNAKE_CASE_ : Any = vae_state_dict['decoder.conv_in.weight']
SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict['decoder.conv_in.bias']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vae_state_dict['decoder.conv_out.weight']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vae_state_dict['decoder.conv_out.bias']
SCREAMING_SNAKE_CASE_ : List[str] = vae_state_dict['decoder.norm_out.weight']
SCREAMING_SNAKE_CASE_ : Dict = vae_state_dict['decoder.norm_out.bias']
SCREAMING_SNAKE_CASE_ : Dict = vae_state_dict['quant_conv.weight']
SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict['quant_conv.bias']
SCREAMING_SNAKE_CASE_ : Dict = vae_state_dict['post_quant_conv.weight']
SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
SCREAMING_SNAKE_CASE_ : List[Any] = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
SCREAMING_SNAKE_CASE_ : int = {
layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(lowerCamelCase_ )
}
# Retrieves the keys for the decoder up blocks only
SCREAMING_SNAKE_CASE_ : Optional[Any] = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
SCREAMING_SNAKE_CASE_ : List[Any] = {
layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(lowerCamelCase_ )
}
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key]
if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
SCREAMING_SNAKE_CASE_ : Dict = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.weight' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.bias' )
SCREAMING_SNAKE_CASE_ : str = renew_vae_resnet_paths(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = {'old': F'down.{i}.block', 'new': F'down_blocks.{i}.resnets'}
assign_to_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path] , config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = [key for key in vae_state_dict if 'encoder.mid.block' in key]
SCREAMING_SNAKE_CASE_ : Dict = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key]
SCREAMING_SNAKE_CASE_ : Optional[int] = renew_vae_resnet_paths(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path] , config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
SCREAMING_SNAKE_CASE_ : int = renew_vae_attention_paths(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path] , config=lowerCamelCase_ )
conv_attn_to_linear(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Dict = num_up_blocks - 1 - i
SCREAMING_SNAKE_CASE_ : Dict = [
key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key
]
if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.weight'
]
SCREAMING_SNAKE_CASE_ : Any = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.bias'
]
SCREAMING_SNAKE_CASE_ : List[Any] = renew_vae_resnet_paths(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = {'old': F'up.{block_id}.block', 'new': F'up_blocks.{i}.resnets'}
assign_to_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path] , config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = [key for key in vae_state_dict if 'decoder.mid.block' in key]
SCREAMING_SNAKE_CASE_ : List[Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE_ : List[str] = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key]
SCREAMING_SNAKE_CASE_ : Tuple = renew_vae_resnet_paths(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = {'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path] , config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
SCREAMING_SNAKE_CASE_ : str = renew_vae_attention_paths(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path] , config=lowerCamelCase_ )
conv_attn_to_linear(lowerCamelCase_ )
return new_checkpoint
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
SCREAMING_SNAKE_CASE_ : Any = io.BytesIO(r.content )
SCREAMING_SNAKE_CASE_ : Optional[Any] = OmegaConf.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = 5_12
SCREAMING_SNAKE_CASE_ : str = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
SCREAMING_SNAKE_CASE_ : Tuple = {}
with safe_open(lowerCamelCase_ , framework='pt' , device='cpu' ) as f:
for key in f.keys():
SCREAMING_SNAKE_CASE_ : Optional[int] = f.get_tensor(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_ )['state_dict']
# Convert the VAE model.
SCREAMING_SNAKE_CASE_ : Optional[Any] = create_vae_diffusers_config(lowerCamelCase_ , image_size=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = custom_convert_ldm_vae_checkpoint(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = AutoencoderKL(**lowerCamelCase_ )
vae.load_state_dict(lowerCamelCase_ )
vae.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
UpperCamelCase__ : str = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 720
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = CLIPTokenizer
__a : List[str] = CLIPTokenizerFast
__a : List[str] = True
__a : Tuple = {}
__a : Tuple = False
def snake_case ( self ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE_ : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@require_ftfy
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE_ : Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE_ : Tuple = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F' {text}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
def snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
# CLIP always lower cases letters
pass
| 685
| 0
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 39
|
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if any(not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(SCREAMING_SNAKE_CASE__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 39
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any =logging.get_logger(__name__)
_A : Dict ={
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _lowercase ( _lowercase ):
a = """trocr"""
a = ["""past_key_values"""]
a = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Optional[Any] , UpperCamelCase__: int=50_265 , UpperCamelCase__: int=1_024 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: Dict=16 , UpperCamelCase__: int=4_096 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: int=512 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Any=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Tuple=False , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Dict=1 , UpperCamelCase__: List[str]=0 , UpperCamelCase__: Union[str, Any]=2 , **UpperCamelCase__: str , ):
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Tuple = d_model
lowerCamelCase__ : Any = decoder_layers
lowerCamelCase__ : Dict = decoder_attention_heads
lowerCamelCase__ : str = decoder_ffn_dim
lowerCamelCase__ : Tuple = activation_function
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : int = dropout
lowerCamelCase__ : int = attention_dropout
lowerCamelCase__ : List[Any] = activation_dropout
lowerCamelCase__ : Union[str, Any] = init_std
lowerCamelCase__ : Optional[int] = decoder_layerdrop
lowerCamelCase__ : Dict = use_cache
lowerCamelCase__ : Any = scale_embedding
lowerCamelCase__ : Optional[int] = use_learned_position_embeddings
lowerCamelCase__ : List[str] = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 631
| 0
|
'''simple docstring'''
import os
def lowerCamelCase( SCREAMING_SNAKE_CASE_ = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ) ) as input_file:
A_ = [
[int(SCREAMING_SNAKE_CASE_ ) for element in line.split(',' )]
for line in input_file.readlines()
]
A_ = len(SCREAMING_SNAKE_CASE_ )
A_ = len(matrix[0] )
A_ = [[-1 for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )]
for i in range(SCREAMING_SNAKE_CASE_ ):
A_ = matrix[i][0]
for j in range(1 ,SCREAMING_SNAKE_CASE_ ):
for i in range(SCREAMING_SNAKE_CASE_ ):
A_ = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 ,SCREAMING_SNAKE_CASE_ ):
A_ = min(
minimal_path_sums[i][j] ,minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 ,-1 ,-1 ):
A_ = min(
minimal_path_sums[i][j] ,minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 366
|
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> str:
# Construct model
if openai_config_file == "":
A_ = OpenAIGPTConfig()
else:
A_ = OpenAIGPTConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
A_ = OpenAIGPTModel(SCREAMING_SNAKE_CASE_ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
A_ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
A_ = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() ,SCREAMING_SNAKE_CASE_ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(SCREAMING_SNAKE_CASE_ ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 366
| 1
|
import unittest
import numpy as np
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a = None , ):
snake_case_ : Union[str, Any] = np.shape(__a )
snake_case_ : Dict = np.shape(__a )
snake_case_ : str = np.shape(__a )
if shape_a[0] != shape_b[0]:
snake_case_ : List[Any] = (
'Expected the same number of rows for A and B. '
f"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(__a )
if shape_b[1] != shape_c[1]:
snake_case_ : Optional[int] = (
'Expected the same number of columns for B and C. '
f"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(__a )
snake_case_ : List[str] = pseudo_inv
if a_inv is None:
try:
snake_case_ : int = np.linalg.inv(__a )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : List[Any] ) -> None:
"""simple docstring"""
snake_case_ : Union[str, Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ : Any = np.array([[2, 1], [6, 3]] )
snake_case_ : int = schur_complement(_A , _A , _A )
snake_case_ : Optional[int] = np.block([[a, b], [b.T, c]] )
snake_case_ : Tuple = np.linalg.det(_A )
snake_case_ : Optional[Any] = np.linalg.det(_A )
snake_case_ : Union[str, Any] = np.linalg.det(_A )
self.assertAlmostEqual(_A , det_a * det_s )
def UpperCAmelCase_ ( self : Optional[Any] ) -> None:
"""simple docstring"""
snake_case_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ : Dict = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_A ):
schur_complement(_A , _A , _A )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> None:
"""simple docstring"""
snake_case_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ : List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ : Any = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_A ):
schur_complement(_A , _A , _A )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 534
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a = None ):
snake_case_ : int = tesseract_config if tesseract_config is not None else ''
# apply OCR
snake_case_ : str = to_pil_image(__a )
snake_case_ ,snake_case_ : str = pil_image.size
snake_case_ : str = pytesseract.image_to_data(__a , lang=__a , output_type='dict' , config=__a )
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ : Dict = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
snake_case_ : str = [idx for idx, word in enumerate(__a ) if not word.strip()]
snake_case_ : Optional[Any] = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices]
snake_case_ : Any = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
snake_case_ : Any = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
snake_case_ : List[Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
snake_case_ : int = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case_ : int = []
for x, y, w, h in zip(__a , __a , __a , __a ):
snake_case_ : str = [x, y, x + w, y + h]
actual_boxes.append(__a )
# finally, normalize the bounding boxes
snake_case_ : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__a , __a , __a ) )
assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: str = ["pixel_values"]
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Optional[str] = None , _A : Optional[str] = "" , **_A : int , ) -> None:
"""simple docstring"""
super().__init__(**_A )
snake_case_ : Optional[int] = size if size is not None else {'height': 224, 'width': 224}
snake_case_ : List[str] = get_size_dict(_A )
snake_case_ : int = do_resize
snake_case_ : int = size
snake_case_ : Any = resample
snake_case_ : List[str] = apply_ocr
snake_case_ : str = ocr_lang
snake_case_ : Dict = tesseract_config
def UpperCAmelCase_ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
snake_case_ : Optional[Any] = (size['height'], size['width'])
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : Optional[int] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Optional[str] = None , _A : Optional[str] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : List[str] , ) -> PIL.Image.Image:
"""simple docstring"""
snake_case_ : Dict = do_resize if do_resize is not None else self.do_resize
snake_case_ : Optional[Any] = size if size is not None else self.size
snake_case_ : Union[str, Any] = get_size_dict(_A )
snake_case_ : Dict = resample if resample is not None else self.resample
snake_case_ : Tuple = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case_ : int = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case_ : int = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case_ : str = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
snake_case_ : List[str] = [to_numpy_array(_A ) for image in images]
if apply_ocr:
requires_backends(self , 'pytesseract' )
snake_case_ : str = []
snake_case_ : List[Any] = []
for image in images:
snake_case_ ,snake_case_ : Tuple = apply_tesseract(_A , _A , _A )
words_batch.append(_A )
boxes_batch.append(_A )
if do_resize:
snake_case_ : Tuple = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
snake_case_ : int = [flip_channel_order(_A ) for image in images]
snake_case_ : Any = [to_channel_dimension_format(_A , _A ) for image in images]
snake_case_ : List[str] = BatchFeature(data={'pixel_values': images} , tensor_type=_A )
if apply_ocr:
snake_case_ : Dict = words_batch
snake_case_ : List[str] = boxes_batch
return data
| 534
| 1
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase( a_ ,unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = None
__lowerCamelCase = BloomTokenizerFast
__lowerCamelCase = BloomTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = '''tokenizer_file'''
__lowerCamelCase = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def snake_case ( self: Tuple ):
super().setUp()
__UpperCAmelCase = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self: List[Any] ,**a: List[str] ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCamelCase_ )
def snake_case ( self: str ):
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
__UpperCAmelCase = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
__UpperCAmelCase = tokenizer.batch_encode_plus(UpperCamelCase_ )['input_ids']
self.assertListEqual(UpperCamelCase_ ,UpperCamelCase_ )
__UpperCAmelCase = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ ,UpperCamelCase_ )
def snake_case ( self: Tuple ,a: Tuple=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ ,**UpperCamelCase_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__UpperCAmelCase = 'This is a simple input'
__UpperCAmelCase = ['This is a simple input 1', 'This is a simple input 2']
__UpperCAmelCase = ('This is a simple input', 'This is a pair')
__UpperCAmelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCamelCase_ ,max_length=UpperCamelCase_ )
tokenizer_r.encode_plus(UpperCamelCase_ ,max_length=UpperCamelCase_ )
tokenizer_r.batch_encode_plus(UpperCamelCase_ ,max_length=UpperCamelCase_ )
tokenizer_r.encode(UpperCamelCase_ ,max_length=UpperCamelCase_ )
tokenizer_r.batch_encode_plus(UpperCamelCase_ ,max_length=UpperCamelCase_ )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
__UpperCAmelCase = None # Hotfixing padding = None
self.assertRaises(UpperCamelCase_ ,tokenizer_r.encode ,UpperCamelCase_ ,max_length=UpperCamelCase_ ,padding='max_length' )
# Simple input
self.assertRaises(UpperCamelCase_ ,tokenizer_r.encode_plus ,UpperCamelCase_ ,max_length=UpperCamelCase_ ,padding='max_length' )
# Simple input
self.assertRaises(
UpperCamelCase_ ,tokenizer_r.batch_encode_plus ,UpperCamelCase_ ,max_length=UpperCamelCase_ ,padding='max_length' ,)
# Pair input
self.assertRaises(UpperCamelCase_ ,tokenizer_r.encode ,UpperCamelCase_ ,max_length=UpperCamelCase_ ,padding='max_length' )
# Pair input
self.assertRaises(UpperCamelCase_ ,tokenizer_r.encode_plus ,UpperCamelCase_ ,max_length=UpperCamelCase_ ,padding='max_length' )
# Pair input
self.assertRaises(
UpperCamelCase_ ,tokenizer_r.batch_encode_plus ,UpperCamelCase_ ,max_length=UpperCamelCase_ ,padding='max_length' ,)
def snake_case ( self: Dict ):
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = load_dataset('xnli' ,'all_languages' ,split='test' ,streaming=UpperCamelCase_ )
__UpperCAmelCase = next(iter(UpperCamelCase_ ) )['premise'] # pick up one data
__UpperCAmelCase = list(sample_data.values() )
__UpperCAmelCase = list(map(tokenizer.encode ,UpperCamelCase_ ) )
__UpperCAmelCase = [tokenizer.decode(UpperCamelCase_ ,clean_up_tokenization_spaces=UpperCamelCase_ ) for x in output_tokens]
self.assertListEqual(UpperCamelCase_ ,UpperCamelCase_ )
def snake_case ( self: Tuple ):
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) ,1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) ,1 )
| 396
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=a_):
"""simple docstring"""
__UpperCAmelCase = ["""flax""", """transformers"""]
def __init__( self : List[Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : Any ):
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : Optional[int] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : str , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Dict ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
class UpperCamelCase__ ( metaclass=a_):
"""simple docstring"""
__UpperCAmelCase = ["""flax""", """transformers"""]
def __init__( self : List[Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : str ):
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : Optional[int] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : Tuple , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
class UpperCamelCase__ ( metaclass=a_):
"""simple docstring"""
__UpperCAmelCase = ["""flax""", """transformers"""]
def __init__( self : Optional[int] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : str , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : Tuple , *UpperCamelCase_ : Any , **UpperCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
class UpperCamelCase__ ( metaclass=a_):
"""simple docstring"""
__UpperCAmelCase = ["""flax""", """transformers"""]
def __init__( self : Optional[Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Dict ):
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : str , *UpperCamelCase_ : Dict , **UpperCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : str , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
| 545
| 0
|
import math
import qiskit
def UpperCamelCase__( UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 1 )->qiskit.result.counts.Counts:
if (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
or isinstance(UpperCamelCase__ , UpperCamelCase__ )
or isinstance(UpperCamelCase__ , UpperCamelCase__ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(UpperCamelCase__ ) != input_a)
or (math.floor(UpperCamelCase__ ) != input_a)
or (math.floor(UpperCamelCase__ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
A__ = qiskit.QuantumRegister(4 , '''qr''' )
A__ = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
A__ = [input_a, input_a, carry_in]
A__ = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(UpperCamelCase__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(UpperCamelCase__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(UpperCamelCase__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , UpperCamelCase__ ) # measure the last two qbits
A__ = qiskit.Aer.get_backend('''aer_simulator''' )
A__ = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=10_00 )
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
print(F"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 717
|
import math
def UpperCamelCase__( UpperCamelCase__ : int )->bool:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
A__ = range(3 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def UpperCamelCase__( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=1 , **UpperCamelCase__ : int )->List[Any]:
A__ = factor * value
A__ = value
while not is_prime(UpperCamelCase__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **UpperCamelCase__ )
return value
| 212
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowerCamelCase ( _snake_case : Dict ,_snake_case : Union[str, Any] ,_snake_case : Optional[int] ):
'''simple docstring'''
lowercase__ = UniSpeechSatForSequenceClassification.from_pretrained(_snake_case ,config=_snake_case )
lowercase__ = downstream_dict["projector.weight"]
lowercase__ = downstream_dict["projector.bias"]
lowercase__ = downstream_dict["model.post_net.linear.weight"]
lowercase__ = downstream_dict["model.post_net.linear.bias"]
return model
def lowerCamelCase ( _snake_case : Union[str, Any] ,_snake_case : int ,_snake_case : Any ):
'''simple docstring'''
lowercase__ = UniSpeechSatForAudioFrameClassification.from_pretrained(_snake_case ,config=_snake_case )
lowercase__ = downstream_dict["model.linear.weight"]
lowercase__ = downstream_dict["model.linear.bias"]
return model
def lowerCamelCase ( _snake_case : Optional[Any] ,_snake_case : Optional[Any] ,_snake_case : int ):
'''simple docstring'''
lowercase__ = UniSpeechSatForXVector.from_pretrained(_snake_case ,config=_snake_case )
lowercase__ = downstream_dict["connector.weight"]
lowercase__ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowercase__ = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
lowercase__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
lowercase__ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
lowercase__ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
lowercase__ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
lowercase__ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
lowercase__ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def lowerCamelCase ( _snake_case : Dict ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : List[Any] ):
'''simple docstring'''
lowercase__ = torch.load(_snake_case ,map_location="cpu" )
lowercase__ = checkpoint["Downstream"]
lowercase__ = UniSpeechSatConfig.from_pretrained(_snake_case )
lowercase__ = WavaVecaFeatureExtractor.from_pretrained(
_snake_case ,return_attention_mask=_snake_case ,do_normalize=_snake_case )
lowercase__ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
lowercase__ = convert_classification(_snake_case ,_snake_case ,_snake_case )
elif arch.endswith("ForAudioFrameClassification" ):
lowercase__ = convert_diarization(_snake_case ,_snake_case ,_snake_case )
elif arch.endswith("ForXVector" ):
lowercase__ = convert_xvector(_snake_case ,_snake_case ,_snake_case )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
lowercase__ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_snake_case )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 267
|
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
SCREAMING_SNAKE_CASE__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class snake_case (nn.Module ):
def __init__( self ,UpperCAmelCase_ ) -> Dict:
super().__init__()
lowercase__ = torchvision.models.resnetaaa(pretrained=UpperCAmelCase_ )
lowercase__ = list(model.children() )[:-2]
lowercase__ = nn.Sequential(*UpperCAmelCase_ )
lowercase__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self ,UpperCAmelCase_ ) -> str:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowercase__ = self.pool(self.model(UpperCAmelCase_ ) )
lowercase__ = torch.flatten(UpperCAmelCase_ ,start_dim=2 )
lowercase__ = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class snake_case (UpperCamelCase ):
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Dict:
lowercase__ = [json.loads(UpperCAmelCase_ ) for l in open(UpperCAmelCase_ )]
lowercase__ = os.path.dirname(UpperCAmelCase_ )
lowercase__ = tokenizer
lowercase__ = labels
lowercase__ = len(UpperCAmelCase_ )
lowercase__ = max_seq_length
lowercase__ = transforms
def __len__( self ) -> Optional[Any]:
return len(self.data )
def __getitem__( self ,UpperCAmelCase_ ) -> Optional[int]:
lowercase__ = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] ,add_special_tokens=UpperCAmelCase_ ) )
lowercase__ , lowercase__ , lowercase__ = sentence[0], sentence[1:-1], sentence[-1]
lowercase__ = sentence[: self.max_seq_length]
lowercase__ = torch.zeros(self.n_classes )
lowercase__ = 1
lowercase__ = Image.open(os.path.join(self.data_dir ,self.data[index]["img"] ) ).convert("RGB" )
lowercase__ = self.transforms(UpperCAmelCase_ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self ) -> str:
lowercase__ = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def lowerCamelCase ( _snake_case : Optional[int] ):
'''simple docstring'''
lowercase__ = [len(row["sentence"] ) for row in batch]
lowercase__ , lowercase__ = len(_snake_case ), max(_snake_case )
lowercase__ = torch.zeros(_snake_case ,_snake_case ,dtype=torch.long )
lowercase__ = torch.zeros(_snake_case ,_snake_case ,dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_snake_case ,_snake_case ) ):
lowercase__ = input_row["sentence"]
lowercase__ = 1
lowercase__ = torch.stack([row["image"] for row in batch] )
lowercase__ = torch.stack([row["label"] for row in batch] )
lowercase__ = torch.stack([row["image_start_token"] for row in batch] )
lowercase__ = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ):
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ):
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] ,std=[0.12_221_994, 0.12_145_835, 0.14_380_469] ,),
] )
| 267
| 1
|
import math
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = []
snake_case_ = 2
snake_case_ = int(math.sqrt(lowercase__ ) ) # Size of every segment
snake_case_ = [True] * (end + 1)
snake_case_ = []
while start <= end:
if temp[start] is True:
in_prime.append(lowercase__ )
for i in range(start * start , end + 1 , lowercase__ ):
snake_case_ = False
start += 1
prime += in_prime
snake_case_ = end + 1
snake_case_ = min(2 * end , lowercase__ )
while low <= n:
snake_case_ = [True] * (high - low + 1)
for each in in_prime:
snake_case_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowercase__ , high + 1 , lowercase__ ):
snake_case_ = False
for j in range(len(lowercase__ ) ):
if temp[j] is True:
prime.append(j + low )
snake_case_ = high + 1
snake_case_ = min(high + end , lowercase__ )
return prime
print(sieve(10**6))
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
"""simple docstring"""
__A = """bit"""
__A = ["""preactivation""", """bottleneck"""]
__A = ["""SAME""", """VALID"""]
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=64 , __UpperCamelCase=[2_56, 5_12, 10_24, 20_48] , __UpperCamelCase=[3, 4, 6, 3] , __UpperCamelCase="preactivation" , __UpperCamelCase="relu" , __UpperCamelCase=None , __UpperCamelCase=32 , __UpperCamelCase=0.0 , __UpperCamelCase=False , __UpperCamelCase=32 , __UpperCamelCase=1 , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case_ = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = global_padding
snake_case_ = num_groups
snake_case_ = drop_path_rate
snake_case_ = embedding_dynamic_padding
snake_case_ = output_stride
snake_case_ = width_factor
snake_case_ = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(__UpperCamelCase ) + 1 )]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
| 46
| 0
|
from __future__ import annotations
def a__ ( snake_case ):
"""simple docstring"""
return len(set(snake_case ) ) == len(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ):
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 74
| 1
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE_=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embeddings_size
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_labels
lowerCamelCase_ = scope
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = TFResNetModel(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFResNetForImageClassification(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = TFResNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase_ = layer_type
lowerCamelCase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFResNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( ) -> Dict:
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='tf' )
# forward pass
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowerCamelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 702
|
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A_ = ["small", "medium", "large"]
A_ = "lm_head.decoder.weight"
A_ = "lm_head.weight"
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
lowerCamelCase_ = torch.load(__UpperCamelCase )
lowerCamelCase_ = d.pop(__UpperCamelCase )
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
torch.save(__UpperCamelCase ,os.path.join(__UpperCamelCase ,__UpperCamelCase ) )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
A_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A_ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
A_ = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 384
| 0
|
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__snake_case: Union[str, Any] = 50_00_00
__snake_case: List[Any] = os.path.split(__file__)
__snake_case: str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def _snake_case ( A_ : Tuple , **A_ : List[str] ):
"""simple docstring"""
a_ : int = dataset.map(**A_ )
@get_duration
def _snake_case ( A_ : int , **A_ : List[Any] ):
"""simple docstring"""
a_ : str = dataset.filter(**A_ )
def _snake_case ( ):
"""simple docstring"""
a_ : Optional[int] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
a_ : Union[str, Any] = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
a_ : Optional[Any] = generate_example_dataset(
os.path.join(A_ , """dataset.arrow""" ) , A_ , num_examples=A_ )
a_ : Dict = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=A_ )
def tokenize(A_ : Optional[int] ):
return tokenizer(examples["""text"""] )
a_ : List[str] = map(A_ )
a_ : Tuple = map(A_ , batched=A_ )
a_ : Optional[Any] = map(A_ , function=lambda A_ : None , batched=A_ )
with dataset.formatted_as(type="""numpy""" ):
a_ : List[Any] = map(A_ , function=lambda A_ : None , batched=A_ )
with dataset.formatted_as(type="""pandas""" ):
a_ : Optional[int] = map(A_ , function=lambda A_ : None , batched=A_ )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
a_ : Dict = map(A_ , function=lambda A_ : None , batched=A_ )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
a_ : Union[str, Any] = map(A_ , function=lambda A_ : None , batched=A_ )
a_ : List[Any] = map(A_ , function=A_ , batched=A_ )
a_ : Dict = filter(A_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(A_ , """wb""" ) as f:
f.write(json.dumps(A_ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 577
|
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if index == r:
for j in range(SCREAMING_SNAKE_CASE ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
SCREAMING_SNAKE_CASE_ = arr[i]
combination_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 , SCREAMING_SNAKE_CASE , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE , 0 )
if __name__ == "__main__":
# Driver code to check the function above
SCREAMING_SNAKE_CASE__ : Dict = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 205
| 0
|
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def UpperCamelCase_ ( A__ , A__ , A__ , A__=None , A__=None , A__=None , A__=None , A__=None , ):
if attention_mask is None:
a_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
a_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
a_ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=A__ )
if decoder_head_mask is None:
a_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=A__ )
if cross_attn_head_mask is None:
a_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=A__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class a_ :
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=99 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase="relu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=20 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=0 , ):
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = encoder_layerdrop
a_ = decoder_layerdrop
a_ = max_position_embeddings
a_ = eos_token_id
a_ = pad_token_id
a_ = bos_token_id
def lowerCAmelCase__ ( self ):
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ = self.eos_token_id # Eos Token
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
a_ = input_ids.clamp(self.pad_token_id + 1 )
a_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
a_ = self.get_config()
a_ = prepare_mam_aaa_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, inputs_dict
def lowerCAmelCase__ ( self ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def lowerCAmelCase__ ( self ):
a_ , a_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
a_ = MaMaaaModel(config=UpperCAmelCase ).get_decoder().to(UpperCAmelCase ).eval()
a_ = inputs_dict["""input_ids"""]
a_ = inputs_dict["""attention_mask"""]
a_ = inputs_dict["""head_mask"""]
# first forward pass
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , head_mask=UpperCAmelCase , use_cache=UpperCAmelCase )
a_ , a_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
a_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
a_ = torch.cat([input_ids, next_tokens] , dim=-1 )
a_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )["""last_hidden_state"""]
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase )[
"""last_hidden_state"""
]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a_ = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-2 ) )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
a_ = MaMaaaModel(config=UpperCAmelCase ).to(UpperCAmelCase ).eval()
a_ = model(**UpperCAmelCase )
a_ = outputs.encoder_last_hidden_state
a_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
a_ = model.get_encoder()
encoder.save_pretrained(UpperCAmelCase )
a_ = MaMaaaEncoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
a_ = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ = model.get_decoder()
decoder.save_pretrained(UpperCAmelCase )
a_ = MaMaaaDecoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
a_ = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class a_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : Optional[Any] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Any = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
lowerCamelCase__ : Tuple = (
{
'conversational': MaMaaaForConditionalGeneration,
'feature-extraction': MaMaaaModel,
'summarization': MaMaaaForConditionalGeneration,
'text2text-generation': MaMaaaForConditionalGeneration,
'translation': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : int = False
lowerCamelCase__ : Union[str, Any] = False
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowerCAmelCase__ ( self ):
a_ = MaMaaaModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
a_ , a_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase )
a_ , a_ = model_class.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
a_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
a_ = copy.deepcopy(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
if not self.is_encoder_decoder:
a_ = inputs["""input_ids"""]
del inputs["input_ids"]
else:
a_ = inputs["""input_ids"""]
a_ = inputs.get("""decoder_input_ids""" , UpperCAmelCase )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , UpperCAmelCase )
a_ = model.get_input_embeddings()
if not self.is_encoder_decoder:
a_ = wte(UpperCAmelCase )
else:
a_ = wte(UpperCAmelCase )
a_ = wte(UpperCAmelCase )
with torch.no_grad():
model(**UpperCAmelCase )[0]
def lowerCAmelCase__ ( self ):
a_ , a_ = self.model_tester.prepare_config_and_inputs()
a_ = input_dict["""input_ids"""]
a_ = input_ids.ne(1 ).to(UpperCAmelCase )
a_ = MaMaaaForConditionalGeneration(UpperCAmelCase ).eval().to(UpperCAmelCase )
if torch_device == "cuda":
model.half()
model.generate(UpperCAmelCase , attention_mask=UpperCAmelCase )
model.generate(num_beams=4 , do_sample=UpperCAmelCase , early_stopping=UpperCAmelCase , num_return_sequences=3 )
def UpperCamelCase_ ( A__ ):
return torch.tensor(A__ , dtype=torch.long , device=A__ )
lowercase__ =1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class a_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self ):
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def lowerCAmelCase__ ( self ):
a_ = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(UpperCAmelCase )
a_ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
a_ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
a_ = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase , UpperCAmelCase )
with torch.no_grad():
a_ = model(**UpperCAmelCase )[0]
a_ = torch.Size((1, 11, 10_24) )
self.assertEqual(output.shape , UpperCAmelCase )
# change to expected output here
a_ = torch.tensor(
[[-0.77_80, -0.16_76, 0.10_38], [-6.75_56, -1.39_92, 0.05_67], [-7.53_83, -0.59_20, -0.27_79]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowerCAmelCase__ ( self ):
a_ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(UpperCAmelCase )
# change to intended input
a_ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
a_ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
a_ = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase , UpperCAmelCase )
with torch.no_grad():
a_ = model(**UpperCAmelCase )[0]
a_ = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase )
# change to expected output here
a_ = torch.tensor(
[[-1.04_48, -1.04_11, 3.79_92], [-3.21_91, -3.23_86, -1.34_51], [-3.62_10, -3.59_93, 0.49_25]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowerCAmelCase__ ( self ):
a_ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(UpperCAmelCase )
a_ = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
a_ = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
a_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors="""pt""" )
a_ = model.generate(
input_ids=dct["""input_ids"""].to(UpperCAmelCase ) , attention_mask=dct["""attention_mask"""].to(UpperCAmelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
a_ = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
a_ = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
assert generated == expected_en
| 511
|
'''simple docstring'''
def UpperCamelCase_ ( A__ ):
if n_term == "":
return []
a_ = []
for temp in range(int(A__ ) ):
series.append(F'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
lowercase__ =input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 511
| 1
|
import math
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
lowercase__ = 2
lowercase__ = int(math.sqrt(SCREAMING_SNAKE_CASE ) ) # Size of every segment
lowercase__ = [True] * (end + 1)
lowercase__ = []
while start <= end:
if temp[start] is True:
in_prime.append(SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , SCREAMING_SNAKE_CASE ):
lowercase__ = False
start += 1
prime += in_prime
lowercase__ = end + 1
lowercase__ = min(2 * end , SCREAMING_SNAKE_CASE )
while low <= n:
lowercase__ = [True] * (high - low + 1)
for each in in_prime:
lowercase__ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(SCREAMING_SNAKE_CASE , high + 1 , SCREAMING_SNAKE_CASE ):
lowercase__ = False
for j in range(len(SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
lowercase__ = high + 1
lowercase__ = min(high + end , SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 43
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ :Any = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :int = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
a_ :int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 478
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 556
|
"""simple docstring"""
SCREAMING_SNAKE_CASE = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
SCREAMING_SNAKE_CASE = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> float:
"""simple docstring"""
UpperCamelCase = from_type.lower().strip("s" )
UpperCamelCase = to_type.lower().strip("s" )
UpperCamelCase = UNIT_SYMBOL.get(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = UNIT_SYMBOL.get(UpperCAmelCase_ , UpperCAmelCase_ )
if from_sanitized not in METRIC_CONVERSION:
UpperCamelCase = (
F"Invalid 'from_type' value: {from_type!r}.\n"
F"Conversion abbreviations are: {', '.join(UpperCAmelCase_ )}"
)
raise ValueError(UpperCAmelCase_ )
if to_sanitized not in METRIC_CONVERSION:
UpperCamelCase = (
F"Invalid 'to_type' value: {to_type!r}.\n"
F"Conversion abbreviations are: {', '.join(UpperCAmelCase_ )}"
)
raise ValueError(UpperCAmelCase_ )
UpperCamelCase = METRIC_CONVERSION[from_sanitized]
UpperCamelCase = METRIC_CONVERSION[to_sanitized]
UpperCamelCase = 1
if from_exponent > to_exponent:
UpperCamelCase = from_exponent - to_exponent
else:
UpperCamelCase = -(to_exponent - from_exponent)
return value * pow(10 , UpperCAmelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 556
| 1
|
from __future__ import annotations
class lowerCamelCase_ :
def __init__( self , lowerCamelCase_ , lowerCamelCase_ ) -> int:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = text, pattern
_UpperCamelCase , _UpperCamelCase = len(lowerCamelCase_ ), len(lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowercase ( self , lowerCamelCase_ ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowercase ( self ) -> list[int]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(self.textLen - self.patLen + 1 ):
_UpperCamelCase = self.mismatch_in_text(lowerCamelCase_ )
if mismatch_index == -1:
positions.append(lowerCamelCase_ )
else:
_UpperCamelCase = self.match_in_pattern(self.text[mismatch_index] )
_UpperCamelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__lowerCAmelCase = """ABAABA"""
__lowerCAmelCase = """AB"""
__lowerCAmelCase = BoyerMooreSearch(text, pattern)
__lowerCAmelCase = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 147
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowercase ( a__ : Dict ) -> Any:
"""simple docstring"""
if not is_accelerate_available():
return method
_UpperCamelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(a__ ) < version.parse("0.17.0" ):
return method
def wrapper(self : List[str] , *a__ : str , **a__ : int ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *a__ , **a__ )
return wrapper
| 147
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = ort.SessionOptions()
SCREAMING_SNAKE_CASE__ = False
return options
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=A__ , feature_extractor=A__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A__ )
SCREAMING_SNAKE_CASE__ = """A red cat sitting on a park bench"""
SCREAMING_SNAKE_CASE__ = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__ = pipe(
prompt=A__ , image=A__ , mask_image=A__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=A__ , output_type="np" , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 720
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __lowercase ( lowerCamelCase_ : int ):
SCREAMING_SNAKE_CASE__ = prime_factors(lowerCamelCase_ )
if is_square_free(lowerCamelCase_ ):
return -1 if len(lowerCamelCase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112
| 0
|
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase ( __a):
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_12 , _A=16 , _A=2 , _A=0.02 , _A=False , _A=True , _A="None" , _A=3 , _A=4 , _A=None , ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Dict = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : str = is_training
_UpperCAmelCase : Optional[int] = use_input_mask
_UpperCAmelCase : Optional[Any] = use_token_type_ids
_UpperCAmelCase : Union[str, Any] = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : str = max_position_embeddings
_UpperCAmelCase : Optional[int] = type_vocab_size
_UpperCAmelCase : Dict = type_sequence_label_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Dict = num_labels
_UpperCAmelCase : List[str] = num_choices
_UpperCAmelCase : str = relative_attention
_UpperCAmelCase : int = position_biased_input
_UpperCAmelCase : List[str] = pos_att_type
_UpperCAmelCase : List[str] = scope
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : List[str] = None
if self.use_input_mask:
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase : Any = None
if self.use_token_type_ids:
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_config()
_UpperCAmelCase : Dict = 3_00
return config
def __snake_case ( self , _A ) -> int:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = DebertaModel(config=_A )
model.to(_A )
model.eval()
_UpperCAmelCase : Dict = model(_A , attention_mask=_A , token_type_ids=_A )[0]
_UpperCAmelCase : Union[str, Any] = model(_A , token_type_ids=_A )[0]
_UpperCAmelCase : int = model(_A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = DebertaForMaskedLM(config=_A )
model.to(_A )
model.eval()
_UpperCAmelCase : int = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.num_labels
_UpperCAmelCase : List[Any] = DebertaForSequenceClassification(_A )
model.to(_A )
model.eval()
_UpperCAmelCase : int = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_A )
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.num_labels
_UpperCAmelCase : Optional[int] = DebertaForTokenClassification(config=_A )
model.to(_A )
model.eval()
_UpperCAmelCase : Tuple = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : int = DebertaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
_UpperCAmelCase : Union[str, Any] = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCAmelCase : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __a , __a , unittest.TestCase):
__a : Tuple = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : Optional[int] = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Union[str, Any] = True
__a : Dict = False
__a : Optional[Any] = False
__a : int = False
__a : Tuple = False
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = DebertaModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=_A , hidden_size=37 )
def __snake_case ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_A )
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_A )
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_A )
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_A )
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_A )
@slow
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[int] = DebertaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase):
@unittest.skip(reason="""Model not available yet""" )
def __snake_case ( self ) -> Dict:
'''simple docstring'''
pass
@slow
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
_UpperCAmelCase : List[str] = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_UpperCAmelCase : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model(_A , attention_mask=_A )[0]
# compare the actual values for a slice.
_UpperCAmelCase : Optional[Any] = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 238
|
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( __a , unittest.TestCase):
__a : str = DebertaTokenizer
__a : Tuple = True
__a : Dict = DebertaTokenizerFast
def __snake_case ( self ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase : Any = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
_UpperCAmelCase : Tuple = dict(zip(_A , range(len(_A ) ) ) )
_UpperCAmelCase : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_UpperCAmelCase : Union[str, Any] = {"""unk_token""": """[UNK]"""}
_UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_A ) )
def __snake_case ( self , **_A ) -> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def __snake_case ( self , _A ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = """lower newer"""
_UpperCAmelCase : Union[str, Any] = """lower newer"""
return input_text, output_text
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : List[Any] = """lower newer"""
_UpperCAmelCase : Tuple = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCAmelCase : List[str] = tokens + [tokenizer.unk_token]
_UpperCAmelCase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : str = tokenizer("""Hello""" , """World""" )
_UpperCAmelCase : Any = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , _A )
@slow
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
_UpperCAmelCase : Any = tokenizer.encode("""sequence builders""" , add_special_tokens=_A )
_UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_A )
_UpperCAmelCase : Dict = tokenizer.encode(
"""sequence builders""" , add_special_tokens=_A , add_prefix_space=_A )
_UpperCAmelCase : List[Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=_A , add_prefix_space=_A )
_UpperCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(_A )
_UpperCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_UpperCAmelCase : Dict = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
_UpperCAmelCase : Optional[int] = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
_UpperCAmelCase : Any = tokenizer(_A , padding=_A )
_UpperCAmelCase : str = [tokenizer.decode(_A , skip_special_tokens=_A ) for seq in encoding["""input_ids"""]]
# fmt: off
_UpperCAmelCase : Tuple = {
"""input_ids""": [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_UpperCAmelCase : List[str] = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , _A )
for expected, decoded in zip(_A , _A ):
self.assertEqual(_A , _A )
| 238
| 1
|
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class __lowerCAmelCase :
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ):
_UpperCAmelCase : Any = start
_UpperCAmelCase : Union[str, Any] = end
_UpperCAmelCase : List[Any] = val
_UpperCAmelCase : Optional[int] = (start + end) // 2
_UpperCAmelCase : Dict = left
_UpperCAmelCase : str = right
def __repr__(self ):
return F"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class __lowerCAmelCase :
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = collection
_UpperCAmelCase : str = function
if self.collection:
_UpperCAmelCase : Optional[Any] = self._build_tree(0 , len(__a ) - 1 )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
self._update_tree(self.root , __a , __a )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
return self._query_range(self.root , __a , __a )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
if start == end:
return SegmentTreeNode(__a , __a , self.collection[start] )
_UpperCAmelCase : int = (start + end) // 2
_UpperCAmelCase : Tuple = self._build_tree(__a , __a )
_UpperCAmelCase : Union[str, Any] = self._build_tree(mid + 1 , __a )
return SegmentTreeNode(__a , __a , self.fn(left.val , right.val ) , __a , __a )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if node.start == i and node.end == i:
_UpperCAmelCase : Optional[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , __a , __a )
else:
self._update_tree(node.right , __a , __a )
_UpperCAmelCase : Optional[int] = self.fn(node.left.val , node.right.val )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , __a , __a )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , __a , node.mid ) , self._query_range(node.right , node.mid + 1 , __a ) , )
else:
# range in right child tree
return self._query_range(node.right , __a , __a )
def snake_case_ (self ):
if self.root is not None:
_UpperCAmelCase : Union[str, Any] = Queue()
queue.put(self.root )
while not queue.empty():
_UpperCAmelCase : List[Any] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
lowerCAmelCase_ : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 720
|
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCAmelCase ( __a ):
@slow
@require_torch
def snake_case_ (self ):
_UpperCAmelCase : Dict = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
_UpperCAmelCase : Tuple = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_UpperCAmelCase : List[str] = bertabert.config.encoder.vocab_size
_UpperCAmelCase : List[str] = tokenizer.sep_token_id
_UpperCAmelCase : List[Any] = tokenizer.cls_token_id
_UpperCAmelCase : List[str] = 1_2_8
_UpperCAmelCase : int = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
_UpperCAmelCase : Tuple = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
_UpperCAmelCase : str = train_dataset.select(range(3_2 ) )
_UpperCAmelCase : str = val_dataset.select(range(1_6 ) )
_UpperCAmelCase : Any = 4
def _map_to_encoder_decoder_inputs(lowerCAmelCase__ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase : List[str] = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=lowerCAmelCase__ , max_length=5_1_2 )
_UpperCAmelCase : Union[str, Any] = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=lowerCAmelCase__ , max_length=1_2_8 )
_UpperCAmelCase : List[Any] = inputs.input_ids
_UpperCAmelCase : int = inputs.attention_mask
_UpperCAmelCase : Union[str, Any] = outputs.input_ids
_UpperCAmelCase : List[Any] = outputs.input_ids.copy()
_UpperCAmelCase : str = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_UpperCAmelCase : Tuple = outputs.attention_mask
assert all(len(lowerCAmelCase__ ) == 5_1_2 for x in inputs.input_ids )
assert all(len(lowerCAmelCase__ ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = pred.label_ids
_UpperCAmelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase : Optional[Any] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : str = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCAmelCase__ ) )] ) / len(lowerCAmelCase__ )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase : int = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
_UpperCAmelCase : Tuple = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
_UpperCAmelCase : Any = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : int = SeqaSeqTrainingArguments(
output_dir=lowerCAmelCase__ , per_device_train_batch_size=lowerCAmelCase__ , per_device_eval_batch_size=lowerCAmelCase__ , predict_with_generate=lowerCAmelCase__ , evaluation_strategy="""steps""" , do_train=lowerCAmelCase__ , do_eval=lowerCAmelCase__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCAmelCase : Any = SeqaSeqTrainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , compute_metrics=_compute_metrics , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , )
# start training
trainer.train()
| 156
| 0
|
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
_lowerCAmelCase = [0] * len(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if indegree[i] == 0:
queue.append(SCREAMING_SNAKE_CASE_ )
while queue:
_lowerCAmelCase = queue.pop(0 )
cnt += 1
topo.append(SCREAMING_SNAKE_CASE_ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(SCREAMING_SNAKE_CASE_ )
if cnt != len(SCREAMING_SNAKE_CASE_ ):
print("Cycle exists" )
else:
print(SCREAMING_SNAKE_CASE_ )
# Adjacency List of Graph
_SCREAMING_SNAKE_CASE = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 18
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = "falcon"
__lowerCamelCase : List[str] = ["past_key_values"]
def __init__( self , _lowerCAmelCase=65024 , _lowerCAmelCase=4544 , _lowerCAmelCase=32 , _lowerCAmelCase=71 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=11 , _lowerCAmelCase=11 , **_lowerCAmelCase , ) -> Union[str, Any]:
_lowerCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCAmelCase = kwargs.pop("n_embed" , _lowerCAmelCase )
_lowerCAmelCase = hidden_size if n_embed is None else n_embed
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = use_cache
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_lowerCAmelCase = alibi
_lowerCAmelCase = new_decoder_architecture
_lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_lowerCAmelCase = parallel_attn
_lowerCAmelCase = bias
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
@property
def _snake_case ( self ) -> Optional[Any]:
return self.hidden_size // self.num_attention_heads
@property
def _snake_case ( self ) -> Optional[Any]:
return not self.alibi
| 18
| 1
|
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
lowercase_ = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
lowercase_ = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
lowercase_ = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , ):
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
__A = new_id
# turn into Numpy arrays
__A = np.array(__UpperCamelCase )
__A = np.array(__UpperCamelCase )
if reduce_labels:
__A = 2_5_5
__A = label - 1
__A = 2_5_5
__A = label != ignore_index
__A = np.not_equal(__UpperCamelCase , __UpperCamelCase )
__A = pred_label[mask]
__A = np.array(__UpperCamelCase )[mask]
__A = pred_label[pred_label == label]
__A = np.histogram(__UpperCamelCase , bins=__UpperCamelCase , range=(0, num_labels - 1) )[0]
__A = np.histogram(__UpperCamelCase , bins=__UpperCamelCase , range=(0, num_labels - 1) )[0]
__A = np.histogram(__UpperCamelCase , bins=__UpperCamelCase , range=(0, num_labels - 1) )[0]
__A = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , ):
"""simple docstring"""
__A = np.zeros((num_labels,) , dtype=np.floataa )
__A = np.zeros((num_labels,) , dtype=np.floataa )
__A = np.zeros((num_labels,) , dtype=np.floataa )
__A = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__UpperCamelCase , __UpperCamelCase ):
__A , __A , __A , __A = intersect_and_union(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ):
"""simple docstring"""
__A , __A , __A , __A = total_intersect_and_union(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# compute metrics
__A = {}
__A = total_area_intersect.sum() / total_area_label.sum()
__A = total_area_intersect / total_area_union
__A = total_area_intersect / total_area_label
__A = np.nanmean(__UpperCamelCase )
__A = np.nanmean(__UpperCamelCase )
__A = all_acc
__A = iou
__A = acc
if nan_to_num is not None:
__A = {metric: np.nan_to_num(__UpperCamelCase , nan=__UpperCamelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ), reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
], )
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Union[str, Any], _lowerCamelCase : Dict, _lowerCamelCase : int, _lowerCamelCase : bool, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[Dict[int, int]] = None, _lowerCamelCase : bool = False, ):
'''simple docstring'''
__A = mean_iou(
results=_lowerCamelCase, gt_seg_maps=_lowerCamelCase, num_labels=_lowerCamelCase, ignore_index=_lowerCamelCase, nan_to_num=_lowerCamelCase, label_map=_lowerCamelCase, reduce_labels=_lowerCamelCase, )
return iou_result
| 712
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : List[Any]=7, _lowerCamelCase : Any=3, _lowerCamelCase : List[Any]=18, _lowerCamelCase : str=30, _lowerCamelCase : List[Any]=4_00, _lowerCamelCase : List[str]=True, _lowerCamelCase : List[Any]=None, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : List[str]=False, _lowerCamelCase : str=True, _lowerCamelCase : int=True, _lowerCamelCase : List[str]=[0.5, 0.5, 0.5], _lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5], ):
'''simple docstring'''
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size if size is not None else {'''height''': 18, '''width''': 20}
__A = do_thumbnail
__A = do_align_axis
__A = do_pad
__A = do_normalize
__A = image_mean
__A = image_std
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = DonutImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = DonutImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_thumbnail''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_pad''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''image_std''' ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 20} )
__A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__A = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) )
self.assertEqual(image_processor.size, {'''height''': 84, '''width''': 42} )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
pass
@is_flaky()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
@is_flaky()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
@is_flaky()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
| 215
| 0
|
from math import sqrt
def a_ ( lowerCAmelCase_ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(sqrt(lowerCAmelCase_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( lowerCAmelCase_ : int = 1_0001 ):
__lowerCAmelCase = 0
__lowerCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCAmelCase_ ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCAmelCase_ ):
count += 1
return number
if __name__ == "__main__":
print(F"""{solution() = }""")
| 53
|
from __future__ import annotations
from math import pow, sqrt
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCamelCase_ ,2 ) - pow(UpperCamelCase_ ,2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCamelCase_ ,2 ) - pow(UpperCamelCase_ ,2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCamelCase_ ,2 ) + pow(UpperCamelCase_ ,2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 550
| 0
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
snake_case__ : Dict = logging.get_logger(__name__)
# General docstring
snake_case__ : Any = '''RegNetConfig'''
# Base docstring
snake_case__ : Any = '''facebook/regnet-y-040'''
snake_case__ : Tuple = [1, 1088, 7, 7]
# Image classification docstring
snake_case__ : Tuple = '''facebook/regnet-y-040'''
snake_case__ : int = '''tabby, tabby cat'''
snake_case__ : Dict = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = 3 , snake_case_ = 1 , snake_case_ = 1 , snake_case_ = "relu" , **snake_case_ , ):
'''simple docstring'''
super().__init__(**snake_case_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCAmelCase_ : Union[str, Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCAmelCase_ : Optional[int] = tf.keras.layers.ConvaD(
filters=snake_case_ , kernel_size=snake_case_ , strides=snake_case_ , padding='VALID' , groups=snake_case_ , use_bias=snake_case_ , name='convolution' , )
UpperCAmelCase_ : List[str] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
UpperCAmelCase_ : Optional[Any] = ACTaFN[activation] if activation is not None else tf.identity
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.convolution(self.padding(snake_case_ ) )
UpperCAmelCase_ : Optional[int] = self.normalization(snake_case_ )
UpperCAmelCase_ : Optional[int] = self.activation(snake_case_ )
return hidden_state
class __SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case_ , **snake_case_ ):
'''simple docstring'''
super().__init__(**snake_case_ )
UpperCAmelCase_ : Tuple = config.num_channels
UpperCAmelCase_ : Tuple = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = shape_list(snake_case_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCAmelCase_ : Union[str, Any] = tf.transpose(snake_case_ , perm=(0, 2, 3, 1) )
UpperCAmelCase_ : Any = self.embedder(snake_case_ )
return hidden_state
class __SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = 2 , **snake_case_ ):
'''simple docstring'''
super().__init__(**snake_case_ )
UpperCAmelCase_ : Tuple = tf.keras.layers.ConvaD(
filters=snake_case_ , kernel_size=1 , strides=snake_case_ , use_bias=snake_case_ , name='convolution' )
UpperCAmelCase_ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def _UpperCamelCase ( self , snake_case_ , snake_case_ = False ):
'''simple docstring'''
return self.normalization(self.convolution(snake_case_ ) , training=snake_case_ )
class __SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , **snake_case_ ):
'''simple docstring'''
super().__init__(**snake_case_ )
UpperCAmelCase_ : str = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case_ , name='pooler' )
UpperCAmelCase_ : str = [
tf.keras.layers.ConvaD(filters=snake_case_ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=snake_case_ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.pooler(snake_case_ )
for layer_module in self.attention:
UpperCAmelCase_ : List[str] = layer_module(snake_case_ )
UpperCAmelCase_ : Optional[Any] = hidden_state * pooled
return hidden_state
class __SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 , **snake_case_ ):
'''simple docstring'''
super().__init__(**snake_case_ )
UpperCAmelCase_ : Optional[Any] = in_channels != out_channels or stride != 1
UpperCAmelCase_ : Union[str, Any] = max(1 , out_channels // config.groups_width )
UpperCAmelCase_ : Optional[Any] = (
TFRegNetShortCut(snake_case_ , stride=snake_case_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCAmelCase_ : str = [
TFRegNetConvLayer(snake_case_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case_ , stride=snake_case_ , groups=snake_case_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(snake_case_ , kernel_size=1 , activation=snake_case_ , name='layer.2' ),
]
UpperCAmelCase_ : Optional[int] = ACTaFN[config.hidden_act]
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = hidden_state
for layer_module in self.layers:
UpperCAmelCase_ : List[Any] = layer_module(snake_case_ )
UpperCAmelCase_ : Union[str, Any] = self.shortcut(snake_case_ )
hidden_state += residual
UpperCAmelCase_ : List[str] = self.activation(snake_case_ )
return hidden_state
class __SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 , **snake_case_ ):
'''simple docstring'''
super().__init__(**snake_case_ )
UpperCAmelCase_ : List[str] = in_channels != out_channels or stride != 1
UpperCAmelCase_ : Optional[Any] = max(1 , out_channels // config.groups_width )
UpperCAmelCase_ : Optional[Any] = (
TFRegNetShortCut(snake_case_ , stride=snake_case_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
UpperCAmelCase_ : List[Any] = [
TFRegNetConvLayer(snake_case_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case_ , stride=snake_case_ , groups=snake_case_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(snake_case_ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(snake_case_ , kernel_size=1 , activation=snake_case_ , name='layer.3' ),
]
UpperCAmelCase_ : Optional[int] = ACTaFN[config.hidden_act]
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = hidden_state
for layer_module in self.layers:
UpperCAmelCase_ : Union[str, Any] = layer_module(snake_case_ )
UpperCAmelCase_ : Any = self.shortcut(snake_case_ )
hidden_state += residual
UpperCAmelCase_ : List[str] = self.activation(snake_case_ )
return hidden_state
class __SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 2 , snake_case_ = 2 , **snake_case_ ):
'''simple docstring'''
super().__init__(**snake_case_ )
UpperCAmelCase_ : int = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
UpperCAmelCase_ : Any = [
# downsampling is done in the first layer with stride of 2
layer(snake_case_ , snake_case_ , snake_case_ , stride=snake_case_ , name='layers.0' ),
*[layer(snake_case_ , snake_case_ , snake_case_ , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
for layer_module in self.layers:
UpperCAmelCase_ : Any = layer_module(snake_case_ )
return hidden_state
class __SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case_ , **snake_case_ ):
'''simple docstring'''
super().__init__(**snake_case_ )
UpperCAmelCase_ : Tuple = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
UpperCAmelCase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case_ , snake_case_ , snake_case_ , depth=snake_case_ , name=F'''stages.{i+1}''' ) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ = False , snake_case_ = True ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCAmelCase_ : Any = hidden_states + (hidden_state,)
UpperCAmelCase_ : Tuple = stage_module(snake_case_ )
if output_hidden_states:
UpperCAmelCase_ : Any = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case_ , hidden_states=snake_case_ )
@keras_serializable
class __SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
lowerCamelCase_ :Optional[int] = RegNetConfig
def __init__( self , snake_case_ , **snake_case_ ):
'''simple docstring'''
super().__init__(**snake_case_ )
UpperCAmelCase_ : Optional[int] = config
UpperCAmelCase_ : Optional[int] = TFRegNetEmbeddings(snake_case_ , name='embedder' )
UpperCAmelCase_ : List[Any] = TFRegNetEncoder(snake_case_ , name='encoder' )
UpperCAmelCase_ : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case_ , name='pooler' )
@unpack_inputs
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = False , ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ : Any = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ : List[str] = self.embedder(snake_case_ , training=snake_case_ )
UpperCAmelCase_ : Optional[int] = self.encoder(
snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ , training=snake_case_ )
UpperCAmelCase_ : Optional[int] = encoder_outputs[0]
UpperCAmelCase_ : Optional[int] = self.pooler(snake_case_ )
# Change to NCHW output format have uniformity in the modules
UpperCAmelCase_ : List[str] = tf.transpose(snake_case_ , perm=(0, 3, 1, 2) )
UpperCAmelCase_ : List[Any] = tf.transpose(snake_case_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCAmelCase_ : Dict = tuple([tf.transpose(snake_case_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case_ , pooler_output=snake_case_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Optional[Any] = RegNetConfig
lowerCamelCase_ :Dict = '''regnet'''
lowerCamelCase_ :List[str] = '''pixel_values'''
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
snake_case__ : Optional[int] = r'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
snake_case__ : Tuple = r'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , lowerCamelCase_ , )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , snake_case_ , *snake_case_ , **snake_case_ ):
'''simple docstring'''
super().__init__(snake_case_ , *snake_case_ , **snake_case_ )
UpperCAmelCase_ : Optional[Any] = TFRegNetMainLayer(snake_case_ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_=False , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ : Union[str, Any] = self.regnet(
pixel_values=snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ , training=snake_case_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowerCamelCase_ , )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , snake_case_ , *snake_case_ , **snake_case_ ):
'''simple docstring'''
super().__init__(snake_case_ , *snake_case_ , **snake_case_ )
UpperCAmelCase_ : int = config.num_labels
UpperCAmelCase_ : str = TFRegNetMainLayer(snake_case_ , name='regnet' )
# classification head
UpperCAmelCase_ : Optional[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCamelCase ( self , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_=False , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ : Tuple = self.regnet(
snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ , training=snake_case_ )
UpperCAmelCase_ : List[str] = outputs.pooler_output if return_dict else outputs[1]
UpperCAmelCase_ : Dict = self.classifier[0](snake_case_ )
UpperCAmelCase_ : Optional[Any] = self.classifier[1](snake_case_ )
UpperCAmelCase_ : List[Any] = None if labels is None else self.hf_compute_loss(labels=snake_case_ , logits=snake_case_ )
if not return_dict:
UpperCAmelCase_ : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case_ , logits=snake_case_ , hidden_states=outputs.hidden_states )
| 721
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 3_2 , snake_case_ = 6_4 , snake_case_ = 2_0 , snake_case_ = 7_6_8 , snake_case_=7_7 , snake_case_=4 , snake_case_ = 0.0 , snake_case_ = "silu" , snake_case_ = None , snake_case_ = None , snake_case_ = "linear" , snake_case_ = "prd" , snake_case_ = None , snake_case_ = None , snake_case_ = None , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Dict = attention_head_dim
UpperCAmelCase_ : int = num_attention_heads * attention_head_dim
UpperCAmelCase_ : str = additional_embeddings
UpperCAmelCase_ : List[Any] = time_embed_dim or inner_dim
UpperCAmelCase_ : Tuple = embedding_proj_dim or embedding_dim
UpperCAmelCase_ : Union[str, Any] = clip_embed_dim or embedding_dim
UpperCAmelCase_ : Tuple = Timesteps(snake_case_ , snake_case_ , 0 )
UpperCAmelCase_ : Tuple = TimestepEmbedding(snake_case_ , snake_case_ , out_dim=snake_case_ , act_fn=snake_case_ )
UpperCAmelCase_ : Union[str, Any] = nn.Linear(snake_case_ , snake_case_ )
if embedding_proj_norm_type is None:
UpperCAmelCase_ : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase_ : Dict = nn.LayerNorm(snake_case_ )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase_ : Tuple = nn.Linear(snake_case_ , snake_case_ )
if encoder_hid_proj_type is None:
UpperCAmelCase_ : List[Any] = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase_ : Tuple = nn.Linear(snake_case_ , snake_case_ )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase_ : Dict = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , snake_case_ ) )
if added_emb_type == "prd":
UpperCAmelCase_ : Tuple = nn.Parameter(torch.zeros(1 , 1 , snake_case_ ) )
elif added_emb_type is None:
UpperCAmelCase_ : str = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase_ : Dict = nn.ModuleList(
[
BasicTransformerBlock(
snake_case_ , snake_case_ , snake_case_ , dropout=snake_case_ , activation_fn='gelu' , attention_bias=snake_case_ , )
for d in range(snake_case_ )
] )
if norm_in_type == "layer":
UpperCAmelCase_ : int = nn.LayerNorm(snake_case_ )
elif norm_in_type is None:
UpperCAmelCase_ : List[str] = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase_ : int = nn.LayerNorm(snake_case_ )
UpperCAmelCase_ : int = nn.Linear(snake_case_ , snake_case_ )
UpperCAmelCase_ : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase_ : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , snake_case_ , persistent=snake_case_ )
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.zeros(1 , snake_case_ ) )
UpperCAmelCase_ : Any = nn.Parameter(torch.zeros(1 , snake_case_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = {}
def fn_recursive_add_processors(snake_case_ , snake_case_ , snake_case_ ):
if hasattr(snake_case_ , 'set_processor' ):
UpperCAmelCase_ : Any = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , snake_case_ , snake_case_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(snake_case_ , snake_case_ , snake_case_ )
return processors
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = len(self.attn_processors.keys() )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(snake_case_ )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(snake_case_ , snake_case_ , snake_case_ ):
if hasattr(snake_case_ , 'set_processor' ):
if not isinstance(snake_case_ , snake_case_ ):
module.set_processor(snake_case_ )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , snake_case_ , snake_case_ )
for name, module in self.named_children():
fn_recursive_attn_processor(snake_case_ , snake_case_ , snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = True , ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = hidden_states.shape[0]
UpperCAmelCase_ : Any = timestep
if not torch.is_tensor(snake_case_ ):
UpperCAmelCase_ : Any = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(snake_case_ ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ : Union[str, Any] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ : Optional[Any] = timesteps * torch.ones(snake_case_ , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase_ : List[str] = self.time_proj(snake_case_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase_ : List[str] = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase_ : List[Any] = self.time_embedding(snake_case_ )
if self.embedding_proj_norm is not None:
UpperCAmelCase_ : Union[str, Any] = self.embedding_proj_norm(snake_case_ )
UpperCAmelCase_ : Tuple = self.embedding_proj(snake_case_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase_ : Tuple = self.encoder_hidden_states_proj(snake_case_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
UpperCAmelCase_ : Optional[int] = self.proj_in(snake_case_ )
UpperCAmelCase_ : Tuple = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Tuple = 0
if encoder_hidden_states is not None:
additional_embeds.append(snake_case_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase_ : Dict = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase_ : str = hidden_states[:, None, :]
UpperCAmelCase_ : Optional[Any] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase_ : Dict = self.prd_embedding.to(hidden_states.dtype ).expand(snake_case_ , -1 , -1 )
additional_embeds.append(snake_case_ )
UpperCAmelCase_ : Union[str, Any] = torch.cat(
snake_case_ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase_ : Union[str, Any] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase_ : Optional[int] = F.pad(
snake_case_ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCAmelCase_ : List[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase_ : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
UpperCAmelCase_ : Dict = F.pad(snake_case_ , (0, self.additional_embeddings) , value=0.0 )
UpperCAmelCase_ : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase_ : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCAmelCase_ : List[str] = self.norm_in(snake_case_ )
for block in self.transformer_blocks:
UpperCAmelCase_ : List[str] = block(snake_case_ , attention_mask=snake_case_ )
UpperCAmelCase_ : Dict = self.norm_out(snake_case_ )
if self.prd_embedding is not None:
UpperCAmelCase_ : Optional[Any] = hidden_states[:, -1]
else:
UpperCAmelCase_ : List[Any] = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase_ : List[Any] = self.proj_to_clip_embeddings(snake_case_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=snake_case_ )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Any = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 389
| 0
|
'''simple docstring'''
def lowerCamelCase ( ) -> int:
'''simple docstring'''
return 1
def lowerCamelCase ( UpperCAmelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase ( UpperCAmelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 1_0 ) + five_pence(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 2_0 ) + ten_pence(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 5_0 ) + twenty_pence(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 1_0_0 ) + fifty_pence(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 2_0_0 ) + one_pound(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int = 2_0_0 ) -> int:
'''simple docstring'''
return two_pound(UpperCAmelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 209
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 279
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Union[str, Any] ) -> List[str]:
"""simple docstring"""
__a = botoa.client('iam' )
__a = {
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=SCREAMING_SNAKE_CASE__, AssumeRolePolicyDocument=json.dumps(SCREAMING_SNAKE_CASE__, indent=2 ) )
__a = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=SCREAMING_SNAKE_CASE__, PolicyName=f"""{role_name}_policy_permission""", PolicyDocument=json.dumps(SCREAMING_SNAKE_CASE__, indent=2 ), )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"""role {role_name} already exists. Using existing one""" )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[str] ) -> Optional[Any]:
"""simple docstring"""
__a = botoa.client('iam' )
return iam_client.get_role(RoleName=SCREAMING_SNAKE_CASE__ )["Role"]["Arn"]
def __UpperCAmelCase ( ) -> Any:
"""simple docstring"""
__a = _ask_options(
'How do you want to authorize?', ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '], SCREAMING_SNAKE_CASE__, )
__a = None
if credentials_configuration == 0:
__a = _ask_field('Enter your AWS Profile name: [default] ', default='default' )
__a = aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
__a = _ask_field('AWS Access Key ID: ' )
__a = aws_access_key_id
__a = _ask_field('AWS Secret Access Key: ' )
__a = aws_secret_access_key
__a = _ask_field('Enter your AWS Region: [us-east-1]', default='us-east-1' )
__a = aws_region
__a = _ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?', ['Provide IAM Role name', 'Create new IAM role using credentials'], SCREAMING_SNAKE_CASE__, )
if role_management == 0:
__a = _ask_field('Enter your IAM role name: ' )
else:
__a = 'accelerate_sagemaker_execution_role'
print(f"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(SCREAMING_SNAKE_CASE__ )
__a = _ask_field(
'Do you want to use custom Docker image? [yes/NO]: ', _convert_yes_no_to_bool, default=SCREAMING_SNAKE_CASE__, error_message='Please enter yes or no.', )
__a = None
if is_custom_docker_image:
__a = _ask_field('Enter your Docker image: ', lambda SCREAMING_SNAKE_CASE__ : str(SCREAMING_SNAKE_CASE__ ).lower() )
__a = _ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ', _convert_yes_no_to_bool, default=SCREAMING_SNAKE_CASE__, error_message='Please enter yes or no.', )
__a = None
if is_sagemaker_inputs_enabled:
__a = _ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ', lambda SCREAMING_SNAKE_CASE__ : str(SCREAMING_SNAKE_CASE__ ).lower(), )
__a = _ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ', _convert_yes_no_to_bool, default=SCREAMING_SNAKE_CASE__, error_message='Please enter yes or no.', )
__a = None
if is_sagemaker_metrics_enabled:
__a = _ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ', lambda SCREAMING_SNAKE_CASE__ : str(SCREAMING_SNAKE_CASE__ ).lower(), )
__a = _ask_options(
'What is the distributed mode?', ['No distributed training', 'Data parallelism'], _convert_sagemaker_distributed_mode, )
__a = {}
__a = _ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:', _convert_yes_no_to_bool, default=SCREAMING_SNAKE_CASE__, error_message='Please enter yes or no.', )
if use_dynamo:
__a = 'dynamo_'
__a = _ask_options(
'Which dynamo backend would you like to use?', [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, )
__a = _ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ', _convert_yes_no_to_bool, default=SCREAMING_SNAKE_CASE__, error_message='Please enter yes or no.', )
if use_custom_options:
__a = _ask_options(
'Which mode do you want to use?', SCREAMING_SNAKE_CASE__, lambda SCREAMING_SNAKE_CASE__ : TORCH_DYNAMO_MODES[int(SCREAMING_SNAKE_CASE__ )], default='default', )
__a = _ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ', _convert_yes_no_to_bool, default=SCREAMING_SNAKE_CASE__, error_message='Please enter yes or no.', )
__a = _ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ', _convert_yes_no_to_bool, default=SCREAMING_SNAKE_CASE__, error_message='Please enter yes or no.', )
__a = 'Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
__a = _ask_options(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, lambda SCREAMING_SNAKE_CASE__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(SCREAMING_SNAKE_CASE__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__a = _ask_field(SCREAMING_SNAKE_CASE__, lambda SCREAMING_SNAKE_CASE__ : str(SCREAMING_SNAKE_CASE__ ).lower(), default='ml.p3.2xlarge' )
__a = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__a = _ask_field(
'How many machines do you want use? [1]: ', SCREAMING_SNAKE_CASE__, default=1, )
__a = _ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?', ['no', 'fp16', 'bf16', 'fp8'], _convert_mixed_precision, )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=SCREAMING_SNAKE_CASE__, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=SCREAMING_SNAKE_CASE__, use_cpu=SCREAMING_SNAKE_CASE__, dynamo_config=SCREAMING_SNAKE_CASE__, eca_instance_type=SCREAMING_SNAKE_CASE__, profile=SCREAMING_SNAKE_CASE__, region=SCREAMING_SNAKE_CASE__, iam_role_name=SCREAMING_SNAKE_CASE__, mixed_precision=SCREAMING_SNAKE_CASE__, num_machines=SCREAMING_SNAKE_CASE__, sagemaker_inputs_file=SCREAMING_SNAKE_CASE__, sagemaker_metrics_file=SCREAMING_SNAKE_CASE__, )
| 270
|
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
# warning at import time
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , _lowerCAmelCase , )
| 270
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowercase__ ( A_ ):
__UpperCAmelCase = '''speech_to_text_2'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , SCREAMING_SNAKE_CASE=1_0000 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="relu" , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=1024 , **SCREAMING_SNAKE_CASE , ) -> Dict:
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : str = d_model
_lowerCamelCase : List[str] = decoder_ffn_dim
_lowerCamelCase : Tuple = decoder_layers
_lowerCamelCase : str = decoder_attention_heads
_lowerCamelCase : Dict = dropout
_lowerCamelCase : Union[str, Any] = attention_dropout
_lowerCamelCase : int = activation_dropout
_lowerCamelCase : str = activation_function
_lowerCamelCase : Dict = init_std
_lowerCamelCase : Optional[Any] = decoder_layerdrop
_lowerCamelCase : Dict = use_cache
_lowerCamelCase : str = decoder_layers
_lowerCamelCase : str = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase : Optional[Any] = max_target_positions
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
| 88
|
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_lowercase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a_ ( UpperCAmelCase__ ):
lowercase_ : Any = ['''pixel_values''']
def __init__( self : Tuple , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[int, float] = 1 / 2_5_5 , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : bool = True , **__lowerCAmelCase : str , ):
super().__init__(**__lowerCAmelCase )
__snake_case = size if size is not None else {'shortest_edge': 2_2_4}
__snake_case = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
__snake_case = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__snake_case = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase , param_name='crop_size' )
__snake_case = do_resize
__snake_case = size
__snake_case = resample
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_normalize
__snake_case = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__snake_case = image_std if image_std is not None else OPENAI_CLIP_STD
__snake_case = do_convert_rgb
def lowercase__ ( self : Tuple , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Tuple , ):
__snake_case = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__snake_case = get_resize_output_image_size(__lowerCAmelCase , size=size['shortest_edge'] , default_to_square=__lowerCAmelCase )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def lowercase__ ( self : List[Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : List[Any] , ):
__snake_case = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(__lowerCAmelCase , size=(size['height'], size['width']) , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def lowercase__ ( self : Dict , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Union[int, float] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Union[str, Any] , ):
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def lowercase__ ( self : Tuple , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Union[float, List[float]] , __lowerCAmelCase : Union[float, List[float]] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Dict , ):
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def lowercase__ ( self : List[str] , __lowerCAmelCase : ImageInput , __lowerCAmelCase : bool = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : int = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : float = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **__lowerCAmelCase : Optional[Any] , ):
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(__lowerCAmelCase , param_name='size' , default_to_square=__lowerCAmelCase )
__snake_case = resample if resample is not None else self.resample
__snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case = crop_size if crop_size is not None else self.crop_size
__snake_case = get_size_dict(__lowerCAmelCase , param_name='crop_size' , default_to_square=__lowerCAmelCase )
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_normalize if do_normalize is not None else self.do_normalize
__snake_case = image_mean if image_mean is not None else self.image_mean
__snake_case = image_std if image_std is not None else self.image_std
__snake_case = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__snake_case = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__snake_case = [convert_to_rgb(__lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
__snake_case = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_center_crop:
__snake_case = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images]
if do_rescale:
__snake_case = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
__snake_case = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
__snake_case = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
__snake_case = {'pixel_values': images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 427
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_lowercase = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
_lowercase = cvtColor(img, COLOR_BGR2GRAY)
def lowerCamelCase__ ( ):
__snake_case = cn.convert_to_negative(a )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCamelCase__ ( ):
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(a , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCamelCase__ ( ):
__snake_case = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCamelCase__ ( ):
__snake_case = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__snake_case = canny.canny(a )
# assert canny array for at least one True
assert canny_array.any()
def lowerCamelCase__ ( ):
assert gg.gaussian_filter(a , 5 , sigma=0.9 ).all()
def lowerCamelCase__ ( ):
# laplace diagonals
__snake_case = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__snake_case = conv.img_convolve(a , a ).astype(a )
assert res.any()
def lowerCamelCase__ ( ):
assert med.median_filter(a , 3 ).any()
def lowerCamelCase__ ( ):
__snake_case , __snake_case = sob.sobel_filter(a )
assert grad.any() and theta.any()
def lowerCamelCase__ ( ):
__snake_case = sp.make_sepia(a , 20 )
assert sepia.all()
def lowerCamelCase__ ( a = "digital_image_processing/image_data/lena_small.jpg" ):
__snake_case = bs.Burkes(imread(a , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCamelCase__ ( a = "digital_image_processing/image_data/lena_small.jpg" , ):
__snake_case = rs.NearestNeighbour(imread(a , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCamelCase__ ( ):
__snake_case = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
__snake_case = imread(a , 0 )
# Test for get_neighbors_pixel function() return not None
__snake_case = 0
__snake_case = 0
__snake_case = image[x_coordinate][y_coordinate]
__snake_case = lbp.get_neighbors_pixel(
a , a , a , a )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__snake_case = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__snake_case = lbp.local_binary_value(a , a , a )
assert lbp_image.any()
| 427
| 1
|
"""simple docstring"""
import baseaa
def a__ ( lowerCAmelCase__ ):
return baseaa.baaencode(string.encode("utf-8" ) )
def a__ ( lowerCAmelCase__ ):
return baseaa.baadecode(lowerCAmelCase__ ).decode("utf-8" )
if __name__ == "__main__":
lowerCamelCase = """Hello World!"""
lowerCamelCase = baseaa_encode(test)
print(encoded)
lowerCamelCase = baseaa_decode(encoded)
print(decoded)
| 82
|
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
lowerCamelCase : Any = random.Random()
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : int=1.0 , lowercase : List[str]=None , lowercase : str=None ):
'''simple docstring'''
if rng is None:
lowerCamelCase_ = global_rng
lowerCamelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , A_ : Dict , A_ : int=7 , A_ : str=400 , A_ : Dict=2000 , A_ : List[Any]=24 , A_ : List[Any]=24 , A_ : int=0.0 , A_ : Dict=16000 , A_ : List[Any]=True , A_ : str=True , ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = min_seq_length
lowerCamelCase_ = max_seq_length
lowerCamelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase_ = feature_size
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = padding_value
lowerCamelCase_ = sampling_rate
lowerCamelCase_ = return_attention_mask
lowerCamelCase_ = do_normalize
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self : List[Any] , A_ : str=False , A_ : Union[str, Any]=False ) -> str:
"""simple docstring"""
def _flatten(A_ : List[Any] ):
return list(itertools.chain(*A_ ) )
if equal_length:
lowerCamelCase_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = SpeechaTextFeatureExtractor if is_speech_available() else None
def a__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = SpeechaTextFeatureExtractionTester(self )
def a__ ( self : str , A_ : Dict ) -> Dict:
"""simple docstring"""
self.assertTrue(np.all(np.mean(A_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ , axis=0 ) - 1 ) < 1E-3 ) )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase_ = feature_extractor(A_ , padding=A_ , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
lowerCamelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
lowerCamelCase_ = feature_extractor(A_ , return_tensors='np' ).input_features
lowerCamelCase_ = feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase_ = np.asarray(A_ )
lowerCamelCase_ = feature_extractor(A_ , return_tensors='np' ).input_features
lowerCamelCase_ = feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = ['longest', 'max_length', 'do_not_pad']
lowerCamelCase_ = [None, 16, None]
for max_length, padding in zip(A_ , A_ ):
lowerCamelCase_ = feature_extractor(
A_ , padding=A_ , max_length=A_ , return_attention_mask=A_ )
lowerCamelCase_ = inputs.input_features
lowerCamelCase_ = inputs.attention_mask
lowerCamelCase_ = [np.sum(A_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = ['longest', 'max_length', 'do_not_pad']
lowerCamelCase_ = [None, 16, None]
for max_length, padding in zip(A_ , A_ ):
lowerCamelCase_ = feature_extractor(
A_ , max_length=A_ , padding=A_ , return_tensors='np' , return_attention_mask=A_ )
lowerCamelCase_ = inputs.input_features
lowerCamelCase_ = inputs.attention_mask
lowerCamelCase_ = [np.sum(A_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feature_extractor(
A_ , padding='max_length' , max_length=4 , truncation=A_ , return_tensors='np' , return_attention_mask=A_ , )
lowerCamelCase_ = inputs.input_features
lowerCamelCase_ = inputs.attention_mask
lowerCamelCase_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feature_extractor(
A_ , padding='longest' , max_length=4 , truncation=A_ , return_tensors='np' , return_attention_mask=A_ , )
lowerCamelCase_ = inputs.input_features
lowerCamelCase_ = inputs.attention_mask
lowerCamelCase_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feature_extractor(
A_ , padding='longest' , max_length=16 , truncation=A_ , return_tensors='np' , return_attention_mask=A_ , )
lowerCamelCase_ = inputs.input_features
lowerCamelCase_ = inputs.attention_mask
lowerCamelCase_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
import torch
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = np.random.rand(100 , 32 ).astype(np.floataa )
lowerCamelCase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase_ = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase_ = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ ( self : List[str] , A_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
from datasets import load_dataset
lowerCamelCase_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCamelCase_ = ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
lowerCamelCase_ = self._load_datasamples(1 )
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = feature_extractor(A_ , return_tensors='pt' ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
| 70
| 0
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 704
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : Optional[Any] = BertJapaneseTokenizer
snake_case_ : Optional[int] = False
snake_case_ : Optional[int] = True
def UpperCamelCase_ ( self : Optional[int]) -> int:
"""simple docstring"""
super().setUp()
_snake_case : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
_snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : str) -> int:
"""simple docstring"""
_snake_case : List[Any] = """こんにちは、世界。 \nこんばんは、世界。"""
_snake_case : Optional[Any] = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
_snake_case , _snake_case : Dict = self.get_input_output_texts(lowerCAmelCase)
_snake_case : Optional[Any] = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
_snake_case : Tuple = tokenizer.decode(lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase)
return text, ids
def UpperCamelCase_ ( self : List[Any]) -> int:
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase_ ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase_ ( self : List[Any]) -> Dict:
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase_ ( self : Dict) -> Any:
"""simple docstring"""
_snake_case : List[str] = self.tokenizer_class(self.vocab_file)
_snake_case : List[Any] = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""")
self.assertListEqual(lowerCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
def UpperCamelCase_ ( self : int) -> Dict:
"""simple docstring"""
_snake_case : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""")
self.assertIsNotNone(lowerCAmelCase)
_snake_case : List[str] = """こんにちは、世界。\nこんばんは、世界。"""
_snake_case : Dict = tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
_snake_case : Tuple = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase , """wb""") as handle:
pickle.dump(lowerCAmelCase , lowerCAmelCase)
with open(lowerCAmelCase , """rb""") as handle:
_snake_case : Dict = pickle.load(lowerCAmelCase)
_snake_case : Optional[int] = tokenizer_new.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
def UpperCamelCase_ ( self : str) -> int:
"""simple docstring"""
_snake_case : Optional[Any] = MecabTokenizer(mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def UpperCamelCase_ ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
try:
_snake_case : Optional[int] = MecabTokenizer(mecab_dic="""unidic_lite""")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def UpperCamelCase_ ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
try:
_snake_case : List[Any] = MecabTokenizer(mecab_dic="""unidic""")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def UpperCamelCase_ ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_snake_case : List[str] = MecabTokenizer(do_lower_case=lowerCAmelCase , mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def UpperCamelCase_ ( self : Optional[int]) -> int:
"""simple docstring"""
try:
_snake_case : Dict = MecabTokenizer(
do_lower_case=lowerCAmelCase , normalize_text=lowerCAmelCase , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""")
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def UpperCamelCase_ ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
_snake_case : str = MecabTokenizer(normalize_text=lowerCAmelCase , mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def UpperCamelCase_ ( self : Union[str, Any]) -> str:
"""simple docstring"""
_snake_case : Optional[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""")
self.assertIsNotNone(lowerCAmelCase)
_snake_case : List[str] = """こんにちは、世界。\nこんばんは、世界。"""
_snake_case : str = tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase , """wb""") as handle:
pickle.dump(lowerCAmelCase , lowerCAmelCase)
with open(lowerCAmelCase , """rb""") as handle:
_snake_case : Optional[Any] = pickle.load(lowerCAmelCase)
_snake_case : Tuple = tokenizer_new.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
@require_sudachi
def UpperCamelCase_ ( self : List[Any]) -> List[str]:
"""simple docstring"""
_snake_case : Union[str, Any] = SudachiTokenizer(sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def UpperCamelCase_ ( self : str) -> Tuple:
"""simple docstring"""
_snake_case : Optional[Any] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国""", """人""", """参政""", """権"""])
@require_sudachi
def UpperCamelCase_ ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_snake_case : Dict = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国人""", """参政権"""])
@require_sudachi
def UpperCamelCase_ ( self : Dict) -> str:
"""simple docstring"""
_snake_case : Union[str, Any] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国人参政権"""])
@require_sudachi
def UpperCamelCase_ ( self : Tuple) -> Tuple:
"""simple docstring"""
_snake_case : List[str] = SudachiTokenizer(do_lower_case=lowerCAmelCase , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def UpperCamelCase_ ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Dict = SudachiTokenizer(normalize_text=lowerCAmelCase , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def UpperCamelCase_ ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Tuple = SudachiTokenizer(trim_whitespace=lowerCAmelCase , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def UpperCamelCase_ ( self : Any) -> Any:
"""simple docstring"""
_snake_case : int = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""")
self.assertIsNotNone(lowerCAmelCase)
_snake_case : Optional[Any] = """こんにちは、世界。\nこんばんは、世界。"""
_snake_case : Tuple = tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
_snake_case : str = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase , """wb""") as handle:
pickle.dump(lowerCAmelCase , lowerCAmelCase)
with open(lowerCAmelCase , """rb""") as handle:
_snake_case : int = pickle.load(lowerCAmelCase)
_snake_case : List[Any] = tokenizer_new.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
@require_jumanpp
def UpperCamelCase_ ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def UpperCamelCase_ ( self : Dict) -> Optional[int]:
"""simple docstring"""
_snake_case : Union[str, Any] = JumanppTokenizer(do_lower_case=lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def UpperCamelCase_ ( self : int) -> List[str]:
"""simple docstring"""
_snake_case : Union[str, Any] = JumanppTokenizer(normalize_text=lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def UpperCamelCase_ ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_snake_case : str = JumanppTokenizer(trim_whitespace=lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def UpperCamelCase_ ( self : Optional[int]) -> int:
"""simple docstring"""
_snake_case : Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""") , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def UpperCamelCase_ ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
_snake_case : str = {}
for i, token in enumerate(lowerCAmelCase):
_snake_case : List[Any] = i
_snake_case : List[Any] = WordpieceTokenizer(vocab=lowerCAmelCase , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""こんにちは""") , ["""こんにちは"""])
self.assertListEqual(tokenizer.tokenize("""こんばんは""") , ["""こん""", """##ばんは"""])
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""") , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""])
def UpperCamelCase_ ( self : Optional[Any]) -> str:
"""simple docstring"""
_snake_case : Optional[int] = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""")
_snake_case : Tuple = tokenizer.subword_tokenizer
_snake_case : Tuple = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""")
self.assertListEqual(lowerCAmelCase , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""])
_snake_case : Union[str, Any] = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""")
self.assertListEqual(lowerCAmelCase , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""])
def UpperCamelCase_ ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_snake_case : int = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""")
_snake_case : str = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase)
_snake_case : Optional[Any] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase)
_snake_case : str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase)
_snake_case : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : Tuple = BertJapaneseTokenizer
snake_case_ : Dict = False
def UpperCamelCase_ ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
super().setUp()
_snake_case : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
def UpperCamelCase_ ( self : str , **lowerCAmelCase : Union[str, Any]) -> Any:
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **lowerCAmelCase)
def UpperCamelCase_ ( self : str , lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
_snake_case : Any = """こんにちは、世界。 \nこんばんは、世界。"""
_snake_case : List[Any] = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def UpperCamelCase_ ( self : str) -> int:
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase_ ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase_ ( self : str) -> Any:
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase_ ( self : Optional[int]) -> int:
"""simple docstring"""
_snake_case : Union[str, Any] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""")
_snake_case : Dict = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""")
self.assertListEqual(
lowerCAmelCase , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12])
def UpperCamelCase_ ( self : List[str]) -> int:
"""simple docstring"""
_snake_case : str = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
_snake_case : int = {}
for i, token in enumerate(lowerCAmelCase):
_snake_case : int = i
_snake_case : Optional[int] = CharacterTokenizer(vocab=lowerCAmelCase , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""こんにちは""") , ["""こ""", """ん""", """に""", """ち""", """は"""])
self.assertListEqual(tokenizer.tokenize("""こんにちほ""") , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""])
def UpperCamelCase_ ( self : int) -> List[str]:
"""simple docstring"""
_snake_case : Optional[int] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""")
_snake_case : List[str] = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase)
_snake_case : Optional[int] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase)
_snake_case : str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase)
_snake_case : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : int) -> List[Any]:
"""simple docstring"""
_snake_case : List[str] = """cl-tohoku/bert-base-japanese"""
_snake_case : int = AutoTokenizer.from_pretrained(lowerCAmelCase)
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase)
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : str) -> Any:
"""simple docstring"""
_snake_case : str = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""") as cm:
BertTokenizer.from_pretrained(lowerCAmelCase)
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from."""))
_snake_case : Any = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""") as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase)
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from."""))
| 198
| 0
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
SCREAMING_SNAKE_CASE : Union[str, Any] = tf_top_k_top_p_filtering(a , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
SCREAMING_SNAKE_CASE : List[Any] = output[output != -float("inf" )]
SCREAMING_SNAKE_CASE : Tuple = tf.cast(
tf.where(tf.not_equal(a , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(a , a , rtol=1e-12 )
tf.debugging.assert_equal(a , a )
@require_tf
class _UpperCamelCase ( unittest.TestCase , __A ):
'''simple docstring'''
if is_tf_available():
lowerCamelCase__ ={
'AutoModelForCausalLM': TFAutoModelForCausalLM,
'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq,
'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM,
'AutoModelForVision2Seq': TFAutoModelForVisionaSeq,
'LogitsProcessorList': TFLogitsProcessorList,
'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor,
'create_tensor_fn': tf.convert_to_tensor,
'floats_tensor': floats_tensor,
'return_tensors': 'tf',
}
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = 2
class _UpperCamelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , a : Optional[int] ) -> str:
"""simple docstring"""
super(a , self ).__init__()
SCREAMING_SNAKE_CASE : Dict = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ),
) , jit_compile=a , )
def __UpperCamelCase ( self : Union[str, Any] , a : List[str] , a : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model.generate(
input_ids=a , attention_mask=a , max_new_tokens=a , return_dict_in_generate=a , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE : Dict = [[2, 0], [102, 103]]
SCREAMING_SNAKE_CASE : Optional[int] = [[1, 0], [1, 1]]
SCREAMING_SNAKE_CASE : Any = DummyModel(model=a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a , a , signatures={"serving_default": dummy_model.serving} )
SCREAMING_SNAKE_CASE : Tuple = tf.saved_model.load(a ).signatures["serving_default"]
for batch_size in range(1 , len(a ) + 1 ):
SCREAMING_SNAKE_CASE : Optional[int] = {
"input_ids": tf.constant(dummy_input_ids[:batch_size] ),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size] ),
}
SCREAMING_SNAKE_CASE : Dict = serving_func(**a )["sequences"]
SCREAMING_SNAKE_CASE : Union[str, Any] = test_model.generate(**a , max_new_tokens=a )
tf.debugging.assert_equal(a , a )
@slow
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = 2
class _UpperCamelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , a : List[Any] ) -> Optional[int]:
"""simple docstring"""
super(a , self ).__init__()
SCREAMING_SNAKE_CASE : List[Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ),
) , jit_compile=a , )
def __UpperCamelCase ( self : str , a : List[str] , a : str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.model.generate(
input_ids=a , attention_mask=a , max_new_tokens=a , return_dict_in_generate=a , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE : str = [[2], [102, 103]]
SCREAMING_SNAKE_CASE : str = [[1], [1, 1]]
SCREAMING_SNAKE_CASE : Optional[Any] = DummyModel(model=a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a , a , signatures={"serving_default": dummy_model.serving} )
SCREAMING_SNAKE_CASE : List[Any] = tf.saved_model.load(a ).signatures["serving_default"]
for input_row in range(len(a ) ):
SCREAMING_SNAKE_CASE : List[str] = {
"input_ids": tf.constant([dummy_input_ids[input_row]] ),
"attention_mask": tf.constant([dummy_attention_masks[input_row]] ),
}
SCREAMING_SNAKE_CASE : Union[str, Any] = serving_func(**a )["sequences"]
SCREAMING_SNAKE_CASE : str = test_model.generate(**a , max_new_tokens=a )
tf.debugging.assert_equal(a , a )
@slow
@require_tensorflow_text
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=a )
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(a , "spiece.model" ) , "rb" ).read() )
SCREAMING_SNAKE_CASE : str = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" )
def __UpperCamelCase ( self : Union[str, Any] , a : Optional[Any] , *a : Union[str, Any] , **a : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.tokenize(a )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = text.pad_model_inputs(
a , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
SCREAMING_SNAKE_CASE : str = self.model.generate(input_ids=a , attention_mask=a )
return self.tokenizer.detokenize(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = CompleteSentenceTransformer()
SCREAMING_SNAKE_CASE : Optional[Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" )
SCREAMING_SNAKE_CASE : Dict = complete_model(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.keras.Model(a , a )
keras_model.save(a )
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = {
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
SCREAMING_SNAKE_CASE : Dict = 14
SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE : int = "Hello, my dog is cute and"
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(a , return_tensors="tf" )
SCREAMING_SNAKE_CASE : str = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE : Dict = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(**a , eos_token_id=a , **a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = [638, 198]
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(**a , eos_token_id=a , **a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" )
SCREAMING_SNAKE_CASE : Optional[int] = "Hugging Face is a technology company based in New York and Paris."
SCREAMING_SNAKE_CASE : List[Any] = bart_tokenizer(a , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : Dict = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" )
SCREAMING_SNAKE_CASE : Dict = bart_model.generate(a ).numpy()
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __UpperCamelCase ( self : Optional[int] , a : int , a : List[str]=None , **a : List[str] ) -> Optional[int]:
"""simple docstring"""
return super().call(a , **a )
SCREAMING_SNAKE_CASE : Optional[int] = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" )
SCREAMING_SNAKE_CASE : Union[str, Any] = bart_model.generate(a , foo="bar" ).numpy()
self.assertTrue(np.array_equal(a , a ) )
class _UpperCamelCase ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def __UpperCamelCase ( self : Tuple , a : Any , **a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return super().call(a , **a )
SCREAMING_SNAKE_CASE : int = FakeEncoder(bart_model.config , bart_model.model.shared )
SCREAMING_SNAKE_CASE : List[str] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
SCREAMING_SNAKE_CASE : Union[str, Any] = bart_model.generate(a ).numpy()
with self.assertRaises(a ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(a , foo="bar" )
| 25
|
from math import pi, sqrt, tan
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values")
return 6 * side_length**2
def lowerCamelCase__ ( _a , _a , _a):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values")
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values")
return 4 * pi * radius**2
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values")
return 3 * pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values")
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase__ ( _a , _a , _a):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values")
SCREAMING_SNAKE_CASE : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values")
return 2 * pi * radius * (height + radius)
def lowerCamelCase__ ( _a , _a):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values")
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori")
return 4 * pow(_a , 2) * torus_radius * tube_radius
def lowerCamelCase__ ( _a , _a):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values")
return length * width
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values")
return side_length**2
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values")
return (base * height) / 2
def lowerCamelCase__ ( _a , _a , _a):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values")
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle")
SCREAMING_SNAKE_CASE : List[str] = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE : Optional[int] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea))
return area
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values")
return base * height
def lowerCamelCase__ ( _a , _a , _a):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values")
return 1 / 2 * (basea + basea) * height
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values")
return pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values")
return pi * radius_x * radius_y
def lowerCamelCase__ ( _a , _a):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values")
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase__ ( _a , _a):
if not isinstance(_a , _a) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides")
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side")
return (sides * length**2) / (4 * tan(pi / sides))
return (sides * length**2) / (4 * tan(pi / sides))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 25
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger(__name__)
def __a ( __UpperCAmelCase , __UpperCAmelCase=False ):
a__ = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
a__ = ''''''
else:
a__ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a__ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
a__ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
a__ = in_proj_weight[
: config.hidden_size, :
]
a__ = in_proj_bias[: config.hidden_size]
a__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a__ = in_proj_weight[
-config.hidden_size :, :
]
a__ = in_proj_bias[-config.hidden_size :]
def __a ( __UpperCAmelCase ):
a__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
a__ = dct.pop(__UpperCAmelCase )
a__ = val
def __a ( ):
a__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a__ = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
a__ = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=__UpperCAmelCase , )
a__ = ViTHybridConfig(backbone_config=__UpperCAmelCase , image_size=384 , num_labels=1000 )
a__ = False
# load original model from timm
a__ = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
a__ = timm_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
a__ = create_rename_keys(__UpperCAmelCase , __UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a__ = '''huggingface/label-files'''
a__ = '''imagenet-1k-id2label.json'''
a__ = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
a__ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
a__ = ViTHybridModel(__UpperCAmelCase ).eval()
else:
a__ = ViTHybridForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# create image processor
a__ = create_transform(**resolve_data_config({} , model=__UpperCAmelCase ) )
a__ = transform.transforms
a__ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
a__ = ViTHybridImageProcessor(
do_resize=__UpperCAmelCase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__UpperCAmelCase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=__UpperCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
a__ = prepare_img()
a__ = transform(__UpperCAmelCase ).unsqueeze(0 )
a__ = processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase )
# verify logits
with torch.no_grad():
a__ = model(__UpperCAmelCase )
a__ = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
a__ = timm_model.forward_features(__UpperCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__UpperCAmelCase , outputs.pooler_output , atol=1e-3 )
else:
a__ = timm_model(__UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
a_ : Dict = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 711
|
from __future__ import annotations
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
a__ = get_failure_array(__UpperCAmelCase )
# 2) Step through text searching for pattern
a__ , a__ = 0, 0 # index into text, pattern
while i < len(__UpperCAmelCase ):
if pattern[j] == text[i]:
if j == (len(__UpperCAmelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
a__ = failure[j - 1]
continue
i += 1
return False
def __a ( __UpperCAmelCase ):
a__ = [0]
a__ = 0
a__ = 1
while j < len(__UpperCAmelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
a__ = failure[i - 1]
continue
j += 1
failure.append(__UpperCAmelCase )
return failure
if __name__ == "__main__":
# Test 1)
a_ : Tuple = 'abc1abc12'
a_ : Optional[Any] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
a_ : Optional[Any] = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
a_ : Any = 'ABABX'
a_ : Any = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
a_ : Union[str, Any] = 'AAAB'
a_ : int = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
a_ : Tuple = 'abcdabcy'
a_ : Optional[Any] = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
a_ : Dict = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 148
| 0
|
_snake_case : Any = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 693
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =42
SCREAMING_SNAKE_CASE__ =42
def __init__( self, _a, _a ) -> Dict:
super().__init__()
self.register_modules(unet=_a, scheduler=_a )
@torch.no_grad()
def __call__( self, _a = 1, _a = 20_00, _a = None, _a = "pil", _a = True, **_a, ) -> Union[ImagePipelineOutput, Tuple]:
__SCREAMING_SNAKE_CASE = self.unet.config.sample_size
__SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size)
__SCREAMING_SNAKE_CASE = self.unet
__SCREAMING_SNAKE_CASE = randn_tensor(_a, generator=_a ) * self.scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE = sample.to(self.device )
self.scheduler.set_timesteps(_a )
self.scheduler.set_sigmas(_a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__SCREAMING_SNAKE_CASE = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__SCREAMING_SNAKE_CASE = self.unet(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_correct(_a, _a, generator=_a ).prev_sample
# prediction step
__SCREAMING_SNAKE_CASE = model(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_pred(_a, _a, _a, generator=_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.prev_sample, output.prev_sample_mean
__SCREAMING_SNAKE_CASE = sample_mean.clamp(0, 1 )
__SCREAMING_SNAKE_CASE = sample.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(_a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_a )
| 693
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Tuple = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 180
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __lowercase ( snake_case ):
"""simple docstring"""
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def __lowercase ( snake_case ):
"""simple docstring"""
for char in word:
__magic_name__ :Union[str, Any] = ord(snake_case )
if not _is_chinese_char(snake_case ):
return 0
return 1
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Any = set()
for token in tokens:
__magic_name__ :Dict = len(snake_case ) > 1 and is_chinese(snake_case )
if chinese_word:
word_set.add(snake_case )
__magic_name__ :str = list(snake_case )
return word_list
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
__magic_name__ :int = max([len(snake_case ) for w in chinese_word_set] )
__magic_name__ :Any = bert_tokens
__magic_name__ , __magic_name__ :List[Any] = 0, len(snake_case )
while start < end:
__magic_name__ :str = True
if is_chinese(bert_word[start] ):
__magic_name__ :Optional[Any] = min(end - start, snake_case )
for i in range(snake_case, 1, -1 ):
__magic_name__ :Any = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
__magic_name__ :Optional[int] = '''##''' + bert_word[j]
__magic_name__ :List[str] = start + i
__magic_name__ :Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[int] = []
for i in range(0, len(snake_case ), 1_0_0 ):
__magic_name__ :List[str] = ltp_tokenizer.seg(lines[i : i + 1_0_0] )[0]
__magic_name__ :List[str] = [get_chinese_word(snake_case ) for r in res]
ltp_res.extend(snake_case )
assert len(snake_case ) == len(snake_case )
__magic_name__ :Union[str, Any] = []
for i in range(0, len(snake_case ), 1_0_0 ):
__magic_name__ :str = bert_tokenizer(lines[i : i + 1_0_0], add_special_tokens=snake_case, truncation=snake_case, max_length=5_1_2 )
bert_res.extend(res['''input_ids'''] )
assert len(snake_case ) == len(snake_case )
__magic_name__ :Union[str, Any] = []
for input_ids, chinese_word in zip(snake_case, snake_case ):
__magic_name__ :Any = []
for id in input_ids:
__magic_name__ :List[Any] = bert_tokenizer._convert_id_to_token(snake_case )
input_tokens.append(snake_case )
__magic_name__ :Dict = add_sub_symbol(snake_case, snake_case )
__magic_name__ :Optional[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case ):
if token[:2] == "##":
__magic_name__ :Optional[int] = token[2:]
# save chinese tokens' pos
if len(snake_case ) == 1 and _is_chinese_char(ord(snake_case ) ):
ref_id.append(snake_case )
ref_ids.append(snake_case )
assert len(snake_case ) == len(snake_case )
return ref_ids
def __lowercase ( snake_case ):
"""simple docstring"""
with open(args.file_name, '''r''', encoding='''utf-8''' ) as f:
__magic_name__ :Union[str, Any] = f.readlines()
__magic_name__ :int = [line.strip() for line in data if len(snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__magic_name__ :List[Any] = LTP(args.ltp ) # faster in GPU device
__magic_name__ :List[str] = BertTokenizer.from_pretrained(args.bert )
__magic_name__ :Optional[int] = prepare_ref(snake_case, snake_case, snake_case )
with open(args.save_path, '''w''', encoding='''utf-8''' ) as f:
__magic_name__ :Dict = [json.dumps(snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
main(args)
| 180
| 1
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case : List[str] = logging.getLogger(__name__)
@dataclass(frozen=a_)
class UpperCamelCase__ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
@dataclass(frozen=a_)
class UpperCamelCase__ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase__ ( a_):
"""simple docstring"""
__UpperCAmelCase = 42
def __init__( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : bool = False , ):
'''simple docstring'''
__magic_name__ = hans_processors[task]()
__magic_name__ = os.path.join(
UpperCamelCase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(UpperCamelCase_ ) , UpperCamelCase_ , ) , )
__magic_name__ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__magic_name__ , __magic_name__ = label_list[2], label_list[1]
__magic_name__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__magic_name__ = cached_features_file + '.lock'
with FileLock(UpperCamelCase_ ):
if os.path.exists(UpperCamelCase_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__magic_name__ = torch.load(UpperCamelCase_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__magic_name__ = (
processor.get_dev_examples(UpperCamelCase_ ) if evaluate else processor.get_train_examples(UpperCamelCase_ )
)
logger.info('Training examples: %s' , len(UpperCamelCase_ ) )
__magic_name__ = hans_convert_examples_to_features(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
logger.info('Saving features into cached file %s' , UpperCamelCase_ )
torch.save(self.features , UpperCamelCase_ )
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Dict , UpperCamelCase_ : Tuple ):
'''simple docstring'''
return self.features[i]
def a__ ( self : Tuple ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase__ :
"""simple docstring"""
__UpperCAmelCase = 42
def __init__( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] = 1_2_8 , UpperCamelCase_ : Dict=False , UpperCamelCase_ : bool = False , ):
'''simple docstring'''
__magic_name__ = hans_processors[task]()
__magic_name__ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__magic_name__ , __magic_name__ = label_list[2], label_list[1]
__magic_name__ = label_list
__magic_name__ = processor.get_dev_examples(UpperCamelCase_ ) if evaluate else processor.get_train_examples(UpperCamelCase_ )
__magic_name__ = hans_convert_examples_to_features(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(UpperCamelCase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__magic_name__ = tf.data.Dataset.from_generator(
UpperCamelCase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def a__ ( self : Tuple ):
'''simple docstring'''
return self.dataset
def __len__( self : Dict ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Dict , UpperCamelCase_ : int ):
'''simple docstring'''
return self.features[i]
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.label_list
class UpperCamelCase__ ( a_):
"""simple docstring"""
def a__ ( self : List[Any] , UpperCamelCase_ : int ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def a__ ( self : int , UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def a__ ( self : Any ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def a__ ( self : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict ):
'''simple docstring'''
__magic_name__ = []
for i, line in enumerate(UpperCamelCase_ ):
if i == 0:
continue
__magic_name__ = '%s-%s' % (set_type, line[0])
__magic_name__ = line[5]
__magic_name__ = line[6]
__magic_name__ = line[7][2:] if line[7].startswith('ex' ) else line[7]
__magic_name__ = line[0]
examples.append(InputExample(guid=UpperCamelCase_ , text_a=UpperCamelCase_ , text_b=UpperCamelCase_ , label=UpperCamelCase_ , pairID=UpperCamelCase_ ) )
return examples
def A ( __snake_case: List[InputExample] , __snake_case: List[str] , __snake_case: int , __snake_case: PreTrainedTokenizer , ) -> Any:
"""simple docstring"""
__magic_name__ = {label: i for i, label in enumerate(__snake_case )}
__magic_name__ = []
for ex_index, example in tqdm.tqdm(enumerate(__snake_case ) , desc='convert examples to features' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('Writing example %d' % (ex_index) )
__magic_name__ = tokenizer(
example.text_a , example.text_b , add_special_tokens=__snake_case , max_length=__snake_case , padding='max_length' , truncation=__snake_case , return_overflowing_tokens=__snake_case , )
__magic_name__ = label_map[example.label] if example.label in label_map else 0
__magic_name__ = int(example.pairID )
features.append(InputFeatures(**__snake_case , label=__snake_case , pairID=__snake_case ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
snake_case : str = {
"""hans""": 3,
}
snake_case : List[str] = {
"""hans""": HansProcessor,
}
| 545
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=a_):
"""simple docstring"""
__UpperCAmelCase = ["""flax""", """transformers"""]
def __init__( self : List[Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : Any ):
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : Optional[int] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : str , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Dict ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
class UpperCamelCase__ ( metaclass=a_):
"""simple docstring"""
__UpperCAmelCase = ["""flax""", """transformers"""]
def __init__( self : List[Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : str ):
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : Optional[int] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : Tuple , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
class UpperCamelCase__ ( metaclass=a_):
"""simple docstring"""
__UpperCAmelCase = ["""flax""", """transformers"""]
def __init__( self : Optional[int] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : str , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : Tuple , *UpperCamelCase_ : Any , **UpperCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
class UpperCamelCase__ ( metaclass=a_):
"""simple docstring"""
__UpperCAmelCase = ["""flax""", """transformers"""]
def __init__( self : Optional[Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Dict ):
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : str , *UpperCamelCase_ : Dict , **UpperCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def a__ ( cls : str , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
| 545
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : List[Any] ={
"""configuration_clap""": [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapAudioConfig""",
"""ClapConfig""",
"""ClapTextConfig""",
],
"""processing_clap""": ["""ClapProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] =[
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapModel""",
"""ClapPreTrainedModel""",
"""ClapTextModel""",
"""ClapTextModelWithProjection""",
"""ClapAudioModel""",
"""ClapAudioModelWithProjection""",
]
_lowerCamelCase : Optional[int] =["""ClapFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_lowerCamelCase : Union[str, Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 706
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 308
| 0
|
def a ( A__ ) -> list:
'''simple docstring'''
if any(not isinstance(A__ , A__ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(A__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(A__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 35
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ :List[str] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35
| 1
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
_lowerCAmelCase = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n"
_lowerCAmelCase = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n"
_lowerCAmelCase = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase(self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
if return_pvalue:
A_ : List[Any] = pearsonr(lowerCAmelCase_ , lowerCAmelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCAmelCase_ , lowerCAmelCase_ )[0] )}
| 480
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase(self ):
A_ : Optional[int] = get_activation("""swish""" )
self.assertIsInstance(lowerCAmelCase_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase(self ):
A_ : Dict = get_activation("""silu""" )
self.assertIsInstance(lowerCAmelCase_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase(self ):
A_ : Optional[Any] = get_activation("""mish""" )
self.assertIsInstance(lowerCAmelCase_ , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase(self ):
A_ : int = get_activation("""gelu""" )
self.assertIsInstance(lowerCAmelCase_ , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 480
| 1
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __snake_case ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = 1.5
__magic_name__ = int(factor * num_class_images )
__magic_name__ = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCamelCase_ , aesthetic_weight=0.1 )
os.makedirs(F'{class_data_dir}/images' , exist_ok=lowerCamelCase_ )
if len(list(Path(F'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
__magic_name__ = client.query(text=lowerCamelCase_ )
if len(lowerCamelCase_ ) >= factor * num_class_images or num_images > 1e4:
break
else:
__magic_name__ = int(factor * num_images )
__magic_name__ = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCamelCase_ , aesthetic_weight=0.1 , )
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = tqdm(desc="downloading real regularization images" , total=lowerCamelCase_ )
with open(F'{class_data_dir}/caption.txt' , "w" ) as fa, open(F'{class_data_dir}/urls.txt' , "w" ) as fa, open(
F'{class_data_dir}/images.txt' , "w" ) as fa:
while total < num_class_images:
__magic_name__ = class_images[count]
count += 1
try:
__magic_name__ = requests.get(images["url"] )
if img.status_code == 200:
__magic_name__ = Image.open(BytesIO(img.content ) )
with open(F'{class_data_dir}/images/{total}.jpg' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser("" , add_help=lowerCamelCase_ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=lowerCamelCase_ , type=lowerCamelCase_ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=lowerCamelCase_ , type=lowerCamelCase_ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=lowerCamelCase_ )
return parser.parse_args()
if __name__ == "__main__":
__magic_name__ : List[str] =parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 664
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 664
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = tempfile.mkdtemp()
# fmt: off
__magic_name__ :Dict = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__magic_name__ :Union[str, Any] = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
__magic_name__ :int = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__magic_name__ :Optional[int] = {'''unk_token''': '''<unk>'''}
__magic_name__ :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__magic_name__ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
__magic_name__ :Dict = {
'''do_resize''': True,
'''size''': 2_0,
'''do_center_crop''': True,
'''crop_size''': 1_8,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
__magic_name__ :Union[str, Any] = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def A ( self , **__lowerCAmelCase ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowerCAmelCase )
def A ( self , **__lowerCAmelCase ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowerCAmelCase )
def A ( self , **__lowerCAmelCase ):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__magic_name__ :Optional[Any] = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.get_tokenizer()
__magic_name__ :Optional[int] = self.get_rust_tokenizer()
__magic_name__ :List[str] = self.get_image_processor()
__magic_name__ :Optional[Any] = OwlViTProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
__magic_name__ :Union[str, Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
__magic_name__ :str = OwlViTProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
__magic_name__ :str = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ :Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__magic_name__ :Tuple = self.get_image_processor(do_normalize=__lowerCAmelCase )
__magic_name__ :Optional[int] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = self.get_image_processor()
__magic_name__ :str = self.get_tokenizer()
__magic_name__ :List[Any] = OwlViTProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__magic_name__ :int = self.prepare_image_inputs()
__magic_name__ :Dict = image_processor(__lowerCAmelCase , return_tensors='''np''' )
__magic_name__ :List[Any] = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = self.get_image_processor()
__magic_name__ :str = self.get_tokenizer()
__magic_name__ :Tuple = OwlViTProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__magic_name__ :str = '''lower newer'''
__magic_name__ :Any = processor(text=__lowerCAmelCase , return_tensors='''np''' )
__magic_name__ :Dict = tokenizer(__lowerCAmelCase , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def A ( self ):
"""simple docstring"""
__magic_name__ :str = self.get_image_processor()
__magic_name__ :Dict = self.get_tokenizer()
__magic_name__ :Optional[int] = OwlViTProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__magic_name__ :Tuple = '''lower newer'''
__magic_name__ :Tuple = self.prepare_image_inputs()
__magic_name__ :Any = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def A ( self ):
"""simple docstring"""
__magic_name__ :str = '''google/owlvit-base-patch32'''
__magic_name__ :int = OwlViTProcessor.from_pretrained(__lowerCAmelCase )
__magic_name__ :List[str] = ['''cat''', '''nasa badge''']
__magic_name__ :Optional[int] = processor(text=__lowerCAmelCase )
__magic_name__ :List[Any] = 1_6
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = '''google/owlvit-base-patch32'''
__magic_name__ :int = OwlViTProcessor.from_pretrained(__lowerCAmelCase )
__magic_name__ :List[Any] = [['''cat''', '''nasa badge'''], ['''person''']]
__magic_name__ :List[str] = processor(text=__lowerCAmelCase )
__magic_name__ :List[Any] = 1_6
__magic_name__ :Union[str, Any] = len(__lowerCAmelCase )
__magic_name__ :int = max([len(__lowerCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = '''google/owlvit-base-patch32'''
__magic_name__ :Tuple = OwlViTProcessor.from_pretrained(__lowerCAmelCase )
__magic_name__ :List[Any] = ['''cat''', '''nasa badge''']
__magic_name__ :Optional[Any] = processor(text=__lowerCAmelCase )
__magic_name__ :int = 1_6
__magic_name__ :Any = inputs['''input_ids''']
__magic_name__ :Optional[int] = [
[4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.get_image_processor()
__magic_name__ :Optional[int] = self.get_tokenizer()
__magic_name__ :Optional[Any] = OwlViTProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__magic_name__ :Tuple = self.prepare_image_inputs()
__magic_name__ :Optional[Any] = self.prepare_image_inputs()
__magic_name__ :Any = processor(images=__lowerCAmelCase , query_images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.get_image_processor()
__magic_name__ :Dict = self.get_tokenizer()
__magic_name__ :List[str] = OwlViTProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
__magic_name__ :Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ :Dict = processor.batch_decode(__lowerCAmelCase )
__magic_name__ :Dict = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
| 180
|
from __future__ import annotations
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = 0
__magic_name__ :Tuple = len(snake_case ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__magic_name__ :str = i + 1
else:
__magic_name__ :List[Any] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 180
| 1
|
"""simple docstring"""
import baseaa
def UpperCamelCase__ ( lowercase__ : str ):
return baseaa.baaencode(string.encode("utf-8" ) )
def UpperCamelCase__ ( lowercase__ : bytes ):
return baseaa.baadecode(lowercase__ ).decode("utf-8" )
if __name__ == "__main__":
__A = "Hello World!"
__A = baseaa_encode(test)
print(encoded)
__A = baseaa_decode(encoded)
print(decoded)
| 134
|
"""simple docstring"""
def UpperCamelCase__ ( lowercase__ : str ):
snake_case : str = [int(lowercase__ ) for i in ip_va_address.split("." ) if i.isdigit()]
return len(lowercase__ ) == 4 and all(0 <= int(lowercase__ ) <= 254 for octet in octets )
if __name__ == "__main__":
__A = input().strip()
__A = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(f'{ip} is a {valid_or_invalid} IP v4 address.')
| 134
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "facebook/bart-large-mnli"
__magic_name__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__magic_name__ = "text_classifier"
__magic_name__ = AutoTokenizer
__magic_name__ = AutoModelForSequenceClassification
__magic_name__ = ["text", ["text"]]
__magic_name__ = ["text"]
def a ( self ):
'''simple docstring'''
super().setup()
_lowerCAmelCase : Tuple = self.model.config
_lowerCAmelCase : str = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
_lowerCAmelCase : Optional[int] = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [F'This example is {label}' for label in labels] , return_tensors='pt' , padding='max_length' , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = outputs.logits
_lowerCAmelCase : Optional[Any] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 630
|
'''simple docstring'''
import socket
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Tuple = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_lowerCAmelCase : Optional[int] = socket.gethostname()
_lowerCAmelCase : Any = 1_2_3_1_2
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_lowerCAmelCase : Union[str, Any] = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(_A )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 630
| 1
|
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 531
|
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _A ( UpperCAmelCase ):
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(UpperCAmelCase ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def _A ( UpperCAmelCase ):
'''simple docstring'''
A__ = pipeline(
task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,)
A__ = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
A__ = PipelineDataFormat.from_str(
format=UpperCAmelCase ,output_path=args.output ,input_path=args.input ,column=args.column if args.column else nlp.default_input_names ,overwrite=args.overwrite ,)
return RunCommand(UpperCAmelCase ,UpperCAmelCase )
class _snake_case( UpperCAmelCase ):
def __init__(self : Tuple , a : Pipeline , a : PipelineDataFormat ) -> Union[str, Any]:
"""simple docstring"""
A__ = nlp
A__ = reader
@staticmethod
def _UpperCamelCase (a : ArgumentParser ) -> str:
"""simple docstring"""
A__ = parser.add_parser('run' , help='Run a pipeline through the CLI' )
run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' )
run_parser.add_argument('--input' , type=a , help='Path to the file to use for inference' )
run_parser.add_argument('--output' , type=a , help='Path to the file that will be used post to write results.' )
run_parser.add_argument('--model' , type=a , help='Name or path to the model to instantiate.' )
run_parser.add_argument('--config' , type=a , help='Name or path to the model\'s config to instantiate.' )
run_parser.add_argument(
'--tokenizer' , type=a , help='Name of the tokenizer to use. (default: same as the model name)' )
run_parser.add_argument(
'--column' , type=a , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , )
run_parser.add_argument(
'--format' , type=a , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , )
run_parser.add_argument(
'--device' , type=a , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' )
run_parser.set_defaults(func=a )
def _UpperCamelCase (self : int ) -> Optional[Any]:
"""simple docstring"""
A__ , A__ = self._nlp, []
for entry in self._reader:
A__ = nlp(**a ) if self._reader.is_multi_columns else nlp(a )
if isinstance(a , a ):
outputs.append(a )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
A__ = self._reader.save_binary(a )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(a )
| 531
| 1
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _lowercase ( a__ : List[Any] , a__ : Dict ) -> Dict:
"""simple docstring"""
assert isinstance(a__ , a__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _lowercase ( a__ : List[Any] , a__ : Tuple , a__ : List[str] , a__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = tmp_path / "cache"
_UpperCamelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCamelCase = SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=a__ , keep_in_memory=a__ ).read()
_check_sql_dataset(a__ , a__ )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _lowercase ( a__ : int , a__ : Dict , a__ : Dict , a__ : Dict ) -> Any:
"""simple docstring"""
_UpperCamelCase = tmp_path / "cache"
_UpperCamelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCamelCase = features.copy() if features else default_expected_features
_UpperCamelCase = (
Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCamelCase = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=a__ , cache_dir=a__ ).read()
_check_sql_dataset(a__ , a__ )
def _lowercase ( a__ : int ) -> Any:
"""simple docstring"""
with contextlib.closing(sqlitea.connect(a__ ) ) as con:
_UpperCamelCase = con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def _lowercase ( a__ : Union[str, Any] , a__ : int , a__ : str ) -> str:
"""simple docstring"""
_UpperCamelCase = tmp_path / "cache"
_UpperCamelCase = os.path.join(a__ , "tmp.sql" )
_UpperCamelCase = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=a__ ).read()
SqlDatasetWriter(a__ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write()
_UpperCamelCase = iter_sql_file(a__ )
_UpperCamelCase = iter_sql_file(a__ )
for rowa, rowa in zip(a__ , a__ ):
assert rowa == rowa
@require_sqlalchemy
def _lowercase ( a__ : int , a__ : Any , a__ : List[Any] ) -> Dict:
"""simple docstring"""
_UpperCamelCase = tmp_path / "cache"
_UpperCamelCase = os.path.join(a__ , "tmp.sql" )
_UpperCamelCase = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=a__ ).read()
SqlDatasetWriter(a__ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write()
_UpperCamelCase = iter_sql_file(a__ )
_UpperCamelCase = iter_sql_file(a__ )
for rowa, rowa in zip(a__ , a__ ):
assert rowa == rowa
@require_sqlalchemy
def _lowercase ( a__ : Optional[Any] , a__ : int , a__ : Any ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = tmp_path / "cache"
_UpperCamelCase = os.path.join(a__ , "tmp.sql" )
_UpperCamelCase = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=a__ ).read()
with pytest.raises(a__ ):
SqlDatasetWriter(a__ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write()
| 589
|
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
__lowerCAmelCase = True
from torch.cuda.amp import autocast
__lowerCAmelCase = logging.getLogger(__name__)
def _lowercase ( a__ : List[str]=None , a__ : Optional[int]=None ) -> Optional[int]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=a__ )
@dataclass
class lowerCamelCase_ :
__lowercase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__lowercase : Optional[str] = field(
default=lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__lowercase : Optional[bool] = field(
default=lowercase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__lowercase : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
__lowercase : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
__lowercase : Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
__lowercase : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
__lowercase : Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
__lowercase : Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class lowerCamelCase_ :
__lowercase : Optional[str] = field(
default=lowercase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__lowercase : Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
__lowercase : bool = field(
default=lowercase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__lowercase : Optional[int] = field(
default=lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
__lowercase : Optional[int] = field(
default=lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__lowercase : Optional[int] = field(
default=lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
__lowercase : List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class lowerCamelCase_ :
__lowercase : WavaVecaProcessor
__lowercase : Union[bool, str] = True
__lowercase : Optional[int] = None
__lowercase : Optional[int] = None
__lowercase : Optional[int] = None
__lowercase : Optional[int] = None
def __call__( self , lowerCamelCase_ ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCamelCase = [{"input_values": feature["input_values"]} for feature in features]
_UpperCamelCase = [{"input_ids": feature["labels"]} for feature in features]
_UpperCamelCase = self.processor.pad(
lowerCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
_UpperCamelCase = self.processor.pad(
labels=lowerCamelCase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
_UpperCamelCase = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
_UpperCamelCase = labels
return batch
class lowerCamelCase_ ( lowercase ):
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> torch.Tensor:
"""simple docstring"""
model.train()
_UpperCamelCase = self._prepare_inputs(lowerCamelCase_ )
if self.use_amp:
with autocast():
_UpperCamelCase = self.compute_loss(lowerCamelCase_ , lowerCamelCase_ )
else:
_UpperCamelCase = self.compute_loss(lowerCamelCase_ , lowerCamelCase_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_UpperCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_UpperCamelCase = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
_UpperCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCamelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCamelCase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCamelCase_ )
else:
loss.backward()
return loss.detach()
def _lowercase ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , a__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_UpperCamelCase = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name )
_UpperCamelCase = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" )
# Create and save tokenizer
_UpperCamelCase = f'''[{"".join(data_args.chars_to_ignore )}]'''
def remove_special_characters(a__ : Tuple ):
_UpperCamelCase = re.sub(a__ , "" , batch["sentence"] ).lower() + " "
return batch
_UpperCamelCase = train_dataset.map(a__ , remove_columns=["sentence"] )
_UpperCamelCase = eval_dataset.map(a__ , remove_columns=["sentence"] )
def extract_all_chars(a__ : Tuple ):
_UpperCamelCase = " ".join(batch["text"] )
_UpperCamelCase = list(set(a__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
_UpperCamelCase = train_dataset.map(
a__ , batched=a__ , batch_size=-1 , keep_in_memory=a__ , remove_columns=train_dataset.column_names , )
_UpperCamelCase = train_dataset.map(
a__ , batched=a__ , batch_size=-1 , keep_in_memory=a__ , remove_columns=eval_dataset.column_names , )
_UpperCamelCase = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) )
_UpperCamelCase = {v: k for k, v in enumerate(a__ )}
_UpperCamelCase = vocab_dict[" "]
del vocab_dict[" "]
_UpperCamelCase = len(a__ )
_UpperCamelCase = len(a__ )
with open("vocab.json" , "w" ) as vocab_file:
json.dump(a__ , a__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=a__ , return_attention_mask=a__ )
_UpperCamelCase = WavaVecaProcessor(feature_extractor=a__ , tokenizer=a__ )
_UpperCamelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_UpperCamelCase = min(len(a__ ) , data_args.max_train_samples )
_UpperCamelCase = train_dataset.select(range(a__ ) )
if data_args.max_val_samples is not None:
_UpperCamelCase = eval_dataset.select(range(data_args.max_val_samples ) )
_UpperCamelCase = torchaudio.transforms.Resample(4_80_00 , 1_60_00 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(a__ : List[Any] ):
_UpperCamelCase , _UpperCamelCase = torchaudio.load(batch["path"] )
_UpperCamelCase = resampler(a__ ).squeeze().numpy()
_UpperCamelCase = 1_60_00
_UpperCamelCase = batch["text"]
return batch
_UpperCamelCase = train_dataset.map(
a__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
a__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(a__ : Dict ):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"] ) ) == 1
), f'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
_UpperCamelCase = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] )
batch.update(a__ )
return batch
_UpperCamelCase = train_dataset.map(
a__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=a__ , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
a__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=a__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
_UpperCamelCase = datasets.load_metric("wer" )
def compute_metrics(a__ : Union[str, Any] ):
_UpperCamelCase = pred.predictions
_UpperCamelCase = np.argmax(a__ , axis=-1 )
_UpperCamelCase = processor.tokenizer.pad_token_id
_UpperCamelCase = processor.batch_decode(a__ )
# we do not want to group tokens when computing the metrics
_UpperCamelCase = processor.batch_decode(pred.label_ids , group_tokens=a__ )
_UpperCamelCase = wer_metric.compute(predictions=a__ , references=a__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_UpperCamelCase = DataCollatorCTCWithPadding(processor=a__ , padding=a__ )
# Initialize our Trainer
_UpperCamelCase = CTCTrainer(
model=a__ , data_collator=a__ , args=a__ , compute_metrics=a__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCamelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_UpperCamelCase = model_args.model_name_or_path
else:
_UpperCamelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_UpperCamelCase = trainer.train(resume_from_checkpoint=a__ )
trainer.save_model()
_UpperCamelCase = train_result.metrics
_UpperCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a__ )
)
_UpperCamelCase = min(a__ , len(a__ ) )
trainer.log_metrics("train" , a__ )
trainer.save_metrics("train" , a__ )
trainer.save_state()
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(a__ )
_UpperCamelCase = min(a__ , len(a__ ) )
trainer.log_metrics("eval" , a__ )
trainer.save_metrics("eval" , a__ )
return results
if __name__ == "__main__":
main()
| 589
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '▁'
SCREAMING_SNAKE_CASE__ = {'vocab_file': 'sentencepiece.bpe.model'}
SCREAMING_SNAKE_CASE__ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
SCREAMING_SNAKE_CASE__ = {
'facebook/xglm-564M': 2_0_4_8,
}
class a_ ( lowerCAmelCase__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCamelCase = 7
UpperCamelCase = [F"<madeupword{i}>" for i in range(self.num_madeup_words )]
UpperCamelCase = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
UpperCamelCase = len(self.sp_model )
UpperCamelCase = {F"<madeupword{i}>": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_A )
UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Any:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCamelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> str:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A ))
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> int:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def A__ ( self ) -> List[Any]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return self.sp_model.encode(_A , out_type=_A )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase = self.sp_model.PieceToId(_A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = ''''''.join(_A ).replace(_A , """ """ ).strip()
return out_string
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[str]:
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase = os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , """wb""" ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 301
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowercase_ = None
try:
import msvcrt
except ImportError:
lowercase_ = None
try:
import fcntl
except ImportError:
lowercase_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowercase_ = OSError
# Data
# ------------------------------------------------
lowercase_ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
lowercase_ = """3.0.12"""
lowercase_ = None
def a__ ( ):
"""simple docstring"""
global _logger
__SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ )
return _logger
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file
return None
def __str__( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = lock
return None
def __enter__( self : Any ):
"""simple docstring"""
return self.lock
def __exit__( self : str , _A : Any , _A : int , _A : Any ):
"""simple docstring"""
self.lock.release()
return None
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A )
# The path to the lock file.
__SCREAMING_SNAKE_CASE : Tuple = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__SCREAMING_SNAKE_CASE : str = None
# The default timeout value.
__SCREAMING_SNAKE_CASE : Any = timeout
# We use this lock primarily for the lock counter.
__SCREAMING_SNAKE_CASE : int = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__SCREAMING_SNAKE_CASE : int = 0
return None
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self._lock_file
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self._timeout
@timeout.setter
def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = float(_A )
return None
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError()
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
raise NotImplementedError()
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self._lock_file_fd is not None
def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ):
"""simple docstring"""
if timeout is None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__SCREAMING_SNAKE_CASE : Tuple = id(self )
__SCREAMING_SNAKE_CASE : Any = self._lock_file
__SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(_A )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase__ ( self : int , _A : List[str]=False ):
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__SCREAMING_SNAKE_CASE : Optional[int] = id(self )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
__SCREAMING_SNAKE_CASE : int = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self : int ):
"""simple docstring"""
self.acquire()
return self
def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ):
"""simple docstring"""
self.release()
return None
def __del__( self : int ):
"""simple docstring"""
self.release(force=_A )
return None
def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = os.path.basename(_A )
if len(_A ) > max_length and max_length > 0:
__SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) )
__SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(_A , _A )
else:
return path
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ):
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(_A , timeout=_A , max_filename_length=_A )
__SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A )
except OSError:
pass
else:
try:
msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(_A )
else:
__SCREAMING_SNAKE_CASE : str = fd
return None
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self._lock_file_fd
__SCREAMING_SNAKE_CASE : int = None
msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 )
os.close(_A )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax
super().__init__(_A , timeout=_A , max_filename_length=_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A )
try:
fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_A )
else:
__SCREAMING_SNAKE_CASE : int = fd
return None
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd
__SCREAMING_SNAKE_CASE : Any = None
fcntl.flock(_A , fcntl.LOCK_UN )
os.close(_A )
return None
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A )
except OSError:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = fd
return None
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
os.close(self._lock_file_fd )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowercase_ = None
if msvcrt:
lowercase_ = WindowsFileLock
elif fcntl:
lowercase_ = UnixFileLock
else:
lowercase_ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 74
| 0
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
A_ : str = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def UpperCamelCase__ ( __magic_name__ : Dict ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
snake_case__ : Optional[Any] = k.replace(__magic_name__ , __magic_name__ )
return k
def UpperCamelCase__ ( __magic_name__ : dict , __magic_name__ : dict ) -> str:
'''simple docstring'''
snake_case__ : Tuple = DEFAULTS.copy()
cfg_kwargs.update(__magic_name__ )
snake_case__ : Tuple = PegasusConfig(**__magic_name__ )
snake_case__ : Optional[int] = PegasusForConditionalGeneration(__magic_name__ )
snake_case__ : str = torch_model.model.state_dict()
snake_case__ : Dict = {}
for k, v in tf_weights.items():
snake_case__ : Union[str, Any] = rename_state_dict_key(__magic_name__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
snake_case__ : List[Any] = v.T
snake_case__ : str = torch.tensor(__magic_name__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
snake_case__ : List[Any] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
snake_case__ : Any = mapping["""shared.weight"""]
snake_case__ : Union[str, Any] = mapping["""shared.weight"""]
snake_case__ : Optional[int] = {k: torch.zeros_like(__magic_name__ ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__magic_name__ )
snake_case__ , snake_case__ : Tuple = torch_model.model.load_state_dict(__magic_name__ , strict=__magic_name__ )
snake_case__ : Any = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def UpperCamelCase__ ( __magic_name__ : Optional[Any]="./ckpt/aeslc/model.ckpt-32000" ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[str] = tf.train.list_variables(__magic_name__ )
snake_case__ : Any = {}
snake_case__ : Dict = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__magic_name__ , desc="""converting tf checkpoint to dict""" ):
snake_case__ : Any = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case__ : str = tf.train.load_variable(__magic_name__ , __magic_name__ )
snake_case__ : Dict = array
return tf_weights
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : str ) -> Dict:
'''simple docstring'''
snake_case__ : Union[str, Any] = Path(__magic_name__ ).parent.name
snake_case__ : Optional[Any] = task_specific_params[f"summarization_{dataset}"]["""max_position_embeddings"""]
snake_case__ : Tuple = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__magic_name__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__magic_name__ )
# convert model
snake_case__ : str = get_tf_weights_as_numpy(__magic_name__ )
snake_case__ : Any = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
snake_case__ : List[str] = task_specific_params
snake_case__ : Optional[int] = convert_pegasus(__magic_name__ , __magic_name__ )
torch_model.save_pretrained(__magic_name__ )
snake_case__ : Tuple = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__magic_name__ , Path(__magic_name__ ) / """pytorch_model.bin""" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
A_ : Union[str, Any] = parser.parse_args()
if args.save_dir is None:
A_ : Any = Path(args.tf_ckpt_path).parent.name
A_ : Union[str, Any] = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 702
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((snake_case__) , (snake_case__)) : Optional[Any] = extended_euclid(__magic_name__ , a % b )
snake_case__ : Optional[Any] = a // b
return (y, x - k * y)
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> int:
'''simple docstring'''
((snake_case__) , (snake_case__)) : Any = extended_euclid(__magic_name__ , __magic_name__ )
snake_case__ : Union[str, Any] = na * na
snake_case__ : Optional[Any] = ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int ) -> int:
'''simple docstring'''
((snake_case__) , (snake_case__)) : Dict = extended_euclid(__magic_name__ , __magic_name__ )
if b < 0:
snake_case__ : List[Any] = (b % n + n) % n
return b
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> int:
'''simple docstring'''
snake_case__ , snake_case__ : List[str] = invert_modulo(__magic_name__ , __magic_name__ ), invert_modulo(__magic_name__ , __magic_name__ )
snake_case__ : Union[str, Any] = na * na
snake_case__ : Dict = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 419
| 0
|
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase ( a__ ):
'''simple docstring'''
lowerCAmelCase :Tuple = filter(lambda a__ : p.requires_grad , model.parameters() )
lowerCAmelCase :Optional[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
if metric == "rouge2":
lowerCAmelCase :int = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
lowerCAmelCase :Optional[Any] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
lowerCAmelCase :Any = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
lowerCAmelCase :Any = ModelCheckpoint(
dirpath=a__ , filename=a__ , monitor=F"""val_{metric}""" , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='min' if 'loss' in metric else 'max' , patience=a__ , verbose=a__ , )
class __UpperCamelCase ( pl.Callback ):
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> List[str]:
lowerCAmelCase :Optional[int] = {f"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(UpperCAmelCase )
@rank_zero_only
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase : pl.Trainer , UpperCAmelCase : pl.LightningModule , UpperCAmelCase : str , UpperCAmelCase : str=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
lowerCAmelCase :str = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
lowerCAmelCase :List[str] = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowerCAmelCase :int = od / 'test_results.txt'
lowerCAmelCase :Any = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowerCAmelCase :Tuple = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
lowerCAmelCase :Optional[Any] = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=UpperCAmelCase )
generations_file.parent.mkdir(exist_ok=UpperCAmelCase )
with open(UpperCAmelCase , 'a+' ) as writer:
for key in sorted(UpperCAmelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
lowerCAmelCase :str = metrics[key]
if isinstance(UpperCAmelCase , torch.Tensor ):
lowerCAmelCase :List[Any] = val.item()
lowerCAmelCase :Any = f"""{key}: {val:.6f}\n"""
writer.write(UpperCAmelCase )
if not save_generations:
return
if "preds" in metrics:
lowerCAmelCase :List[Any] = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(UpperCAmelCase )
@rank_zero_only
def UpperCAmelCase__ ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Dict ) -> int:
try:
lowerCAmelCase :Tuple = pl_module.model.model.num_parameters()
except AttributeError:
lowerCAmelCase :List[Any] = pl_module.model.num_parameters()
lowerCAmelCase :List[str] = count_trainable_parameters(UpperCAmelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCAmelCase__ ( self : Any , UpperCAmelCase : pl.Trainer , UpperCAmelCase : pl.LightningModule ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(UpperCAmelCase , UpperCAmelCase , 'test' )
@rank_zero_only
def UpperCAmelCase__ ( self : int , UpperCAmelCase : pl.Trainer , UpperCAmelCase : Optional[Any] ) -> Dict:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 553
|
"""simple docstring"""
from math import sqrt
def UpperCAmelCase ( a__ = 1_00_00_00 ):
'''simple docstring'''
lowerCAmelCase :int = 0
lowerCAmelCase :int = 0
lowerCAmelCase :int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 553
| 1
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__UpperCAmelCase : List[Any] = datasets.utils.logging.get_logger(__name__)
class _snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
_A = None
_A = None
class _snake_case ( folder_based_builder.FolderBasedBuilder ):
_A = datasets.Audio()
_A = 'audio'
_A = AudioFolderConfig
_A = 42 # definition at the bottom of the script
_A = AudioClassification(audio_column='audio' , label_column='label' )
__UpperCAmelCase : Optional[int] = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
__UpperCAmelCase : List[str] = AUDIO_EXTENSIONS
| 707
|
def lowercase_ ( __snake_case : str ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__snake_case ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 57
| 0
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Dict = eval_examples
__magic_name__ :Tuple = post_process_function
def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval" ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.eval_dataset if eval_dataset is None else eval_dataset
__magic_name__ :str = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :Any = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Union[str, Any] = self.compute_metrics
__magic_name__ :List[str] = None
__magic_name__ :int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__magic_name__ :Tuple = time.time()
try:
__magic_name__ :Union[str, Any] = eval_loop(
__lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , metric_key_prefix=__lowerCAmelCase , )
finally:
__magic_name__ :Optional[Any] = compute_metrics
__magic_name__ :Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__lowerCAmelCase , __lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__magic_name__ :Dict = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions )
__magic_name__ :List[Any] = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :List[str] = metrics.pop(__lowerCAmelCase )
metrics.update(output.metrics )
else:
__magic_name__ :str = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__lowerCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__magic_name__ :List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase )
return metrics
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test" ):
"""simple docstring"""
__magic_name__ :Tuple = self.get_test_dataloader(__lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :List[str] = self.compute_metrics
__magic_name__ :List[Any] = None
__magic_name__ :Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__magic_name__ :List[Any] = time.time()
try:
__magic_name__ :Union[str, Any] = eval_loop(
__lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , metric_key_prefix=__lowerCAmelCase , )
finally:
__magic_name__ :Union[str, Any] = compute_metrics
__magic_name__ :str = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__lowerCAmelCase , __lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__magic_name__ :List[str] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , '''predict''' )
__magic_name__ :Dict = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :str = metrics.pop(__lowerCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase )
| 0
|
from sklearn.metrics import matthews_corrcoef
import datasets
SCREAMING_SNAKE_CASE__ : Optional[Any] = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
SCREAMING_SNAKE_CASE__ : int = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def A ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase , sample_weight=__lowerCAmelCase ) ),
}
| 0
| 1
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class a__:
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=10 , _UpperCAmelCase=3 , _UpperCAmelCase=32 * 4 , _UpperCAmelCase=32 * 6 , _UpperCAmelCase=4 , _UpperCAmelCase=32 , ) -> List[str]:
snake_case__ =parent
snake_case__ =batch_size
snake_case__ =is_training
snake_case__ =use_auxiliary_loss
snake_case__ =num_queries
snake_case__ =num_channels
snake_case__ =min_size
snake_case__ =max_size
snake_case__ =num_labels
snake_case__ =mask_feature_size
def _lowercase ( self ) -> Optional[int]:
snake_case__ =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCAmelCase )
snake_case__ =torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase )
snake_case__ =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5
).float()
snake_case__ =(torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long()
snake_case__ =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _lowercase ( self ) -> List[Any]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _lowercase ( self ) -> Optional[int]:
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ =self.prepare_config_and_inputs()
snake_case__ ={'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
snake_case__ =output.encoder_hidden_states
snake_case__ =output.pixel_decoder_hidden_states
snake_case__ =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_config.decoder_layers )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Union[str, Any]:
with torch.no_grad():
snake_case__ =MaskFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
snake_case__ =model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
snake_case__ =MaskFormerForInstanceSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
def comm_check_on_output(_UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case__ =model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
snake_case__ =model(_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
snake_case__ =model(
pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class a__( snake_case__ , snake_case__ , unittest.TestCase ):
a_ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
a_ : Optional[int] = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
a_ : Any = False
a_ : int = False
a_ : Union[str, Any] = False
a_ : int = False
def _lowercase ( self ) -> Dict:
snake_case__ =MaskFormerModelTester(self )
snake_case__ =ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def _lowercase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Dict:
snake_case__ , snake_case__ =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _lowercase ( self ) -> str:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_UpperCAmelCase )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def _lowercase ( self ) -> int:
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def _lowercase ( self ) -> Dict:
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def _lowercase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def _lowercase ( self ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowercase ( self ) -> Union[str, Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowercase ( self ) -> List[Any]:
pass
def _lowercase ( self ) -> Dict:
snake_case__ , snake_case__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ =model_class(_UpperCAmelCase )
snake_case__ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ =[*signature.parameters.keys()]
snake_case__ =['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@slow
def _lowercase ( self ) -> Tuple:
for model_name in ["facebook/maskformer-swin-small-coco"]:
snake_case__ =MaskFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _lowercase ( self ) -> Tuple:
snake_case__ =(self.model_tester.min_size,) * 2
snake_case__ ={
'pixel_values': torch.randn((2, 3, *size) , device=_UpperCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=_UpperCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(),
}
snake_case__ =MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_UpperCAmelCase )
snake_case__ =model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def _lowercase ( self ) -> Tuple:
snake_case__ , snake_case__ =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _lowercase ( self ) -> List[Any]:
snake_case__ , snake_case__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ =model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
snake_case__ =model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def _lowercase ( self ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
snake_case__ =self.all_model_classes[1]
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ =self.model_tester.prepare_config_and_inputs()
snake_case__ =model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
snake_case__ =model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss
loss.backward()
def _lowercase ( self ) -> List[Any]:
# only MaskFormerForInstanceSegmentation has the loss
snake_case__ =self.all_model_classes[1]
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ =self.model_tester.prepare_config_and_inputs()
snake_case__ =True
snake_case__ =True
snake_case__ =model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
snake_case__ =model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
snake_case__ =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case__ =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
snake_case__ =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case__ =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
SCREAMING_SNAKE_CASE__ : Any = 1e-4
def a ( ) -> List[Any]:
snake_case__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class a__( unittest.TestCase ):
@cached_property
def _lowercase ( self ) -> Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def _lowercase ( self ) -> Tuple:
snake_case__ =MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(_UpperCAmelCase )
snake_case__ =self.default_image_processor
snake_case__ =prepare_img()
snake_case__ =image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
snake_case__ =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
snake_case__ =model(**_UpperCAmelCase )
snake_case__ =torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
snake_case__ =torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
snake_case__ =torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _lowercase ( self ) -> Tuple:
snake_case__ =(
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(_UpperCAmelCase )
.eval()
)
snake_case__ =self.default_image_processor
snake_case__ =prepare_img()
snake_case__ =image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
snake_case__ =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
snake_case__ =model(**_UpperCAmelCase )
# masks_queries_logits
snake_case__ =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
snake_case__ =[
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
snake_case__ =torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
snake_case__ =outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case__ =torch.tensor(
[
[1.6_512E00, -5.2_572E00, -3.3_519E00],
[3.6_169E-02, -5.9_025E00, -2.9_313E00],
[1.0_766E-04, -7.7_630E00, -5.1_263E00],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _lowercase ( self ) -> List[Any]:
snake_case__ =(
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(_UpperCAmelCase )
.eval()
)
snake_case__ =self.default_image_processor
snake_case__ =prepare_img()
snake_case__ =image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
snake_case__ =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
snake_case__ =model(**_UpperCAmelCase )
# masks_queries_logits
snake_case__ =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
snake_case__ =[[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
snake_case__ =torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
snake_case__ =outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case__ =torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _lowercase ( self ) -> Tuple:
snake_case__ =(
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(_UpperCAmelCase )
.eval()
)
snake_case__ =self.default_image_processor
snake_case__ =image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
snake_case__ =inputs['pixel_values'].to(_UpperCAmelCase )
snake_case__ =[el.to(_UpperCAmelCase ) for el in inputs['mask_labels']]
snake_case__ =[el.to(_UpperCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
snake_case__ =model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 581
|
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class a__( snake_case__ , unittest.TestCase ):
a_ : str = PriorTransformer
a_ : Tuple = '''hidden_states'''
@property
def _lowercase ( self ) -> int:
snake_case__ =4
snake_case__ =8
snake_case__ =7
snake_case__ =floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
snake_case__ =floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
snake_case__ =floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowercase ( self , _UpperCAmelCase=0 ) -> List[str]:
torch.manual_seed(_UpperCAmelCase )
snake_case__ =4
snake_case__ =8
snake_case__ =7
snake_case__ =torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
snake_case__ =torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
snake_case__ =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _lowercase ( self ) -> str:
return (4, 8)
@property
def _lowercase ( self ) -> Any:
return (4, 8)
def _lowercase ( self ) -> Dict:
snake_case__ ={
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
snake_case__ =self.dummy_input
return init_dict, inputs_dict
def _lowercase ( self ) -> List[Any]:
snake_case__ , snake_case__ =PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(_UpperCAmelCase )
snake_case__ =model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _lowercase ( self ) -> Optional[Any]:
snake_case__ , snake_case__ =self.prepare_init_args_and_inputs_for_common()
snake_case__ =self.model_class(**_UpperCAmelCase )
snake_case__ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ =[*signature.parameters.keys()]
snake_case__ =['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , _UpperCAmelCase )
def _lowercase ( self ) -> str:
snake_case__ =PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
snake_case__ =model.to(_UpperCAmelCase )
if hasattr(_UpperCAmelCase , 'set_default_attn_processor' ):
model.set_default_attn_processor()
snake_case__ =self.get_dummy_seed_input()
with torch.no_grad():
snake_case__ =model(**_UpperCAmelCase )[0]
snake_case__ =output[0, :5].flatten().cpu()
print(_UpperCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
snake_case__ =torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-2 ) )
@slow
class a__( unittest.TestCase ):
def _lowercase ( self , _UpperCAmelCase=1 , _UpperCAmelCase=768 , _UpperCAmelCase=77 , _UpperCAmelCase=0 ) -> Optional[Any]:
torch.manual_seed(_UpperCAmelCase )
snake_case__ =batch_size
snake_case__ =embedding_dim
snake_case__ =num_embeddings
snake_case__ =torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
snake_case__ =torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
snake_case__ =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowercase ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
snake_case__ =PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(_UpperCAmelCase )
snake_case__ =self.get_dummy_seed_input(seed=_UpperCAmelCase )
with torch.no_grad():
snake_case__ =model(**_UpperCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
snake_case__ =sample[0, :8].flatten().cpu()
print(_UpperCAmelCase )
snake_case__ =torch.tensor(_UpperCAmelCase )
assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
| 581
| 1
|
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt'}
a_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
a_ = {
'openbmb/cpm-ant-10b': 1_0_2_4,
}
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Any = collections.OrderedDict()
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as reader:
__lowercase : List[Any] = reader.readlines()
for index, token in enumerate(__UpperCamelCase ):
__lowercase : str = token.rstrip('''\n''' )
__lowercase : Optional[Any] = index
return vocab
class UpperCAmelCase_ ( snake_case ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<unk>" , UpperCamelCase_=2_00 ) -> Any:
__lowercase : Any = vocab
__lowercase : Dict = unk_token
__lowercase : List[str] = max_input_chars_per_word
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]:
__lowercase : Union[str, Any] = list(UpperCamelCase_ )
if len(UpperCamelCase_ ) > self.max_input_chars_per_word:
return [self.unk_token]
__lowercase : Any = 0
__lowercase : List[str] = []
while start < len(UpperCamelCase_ ):
__lowercase : List[str] = len(UpperCamelCase_ )
__lowercase : Optional[Any] = None
while start < end:
__lowercase : Union[str, Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
__lowercase : Union[str, Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(UpperCamelCase_ )
__lowercase : int = end
return sub_tokens
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase =["input_ids", "attention_mask"]
UpperCamelCase =False
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<d>" , UpperCamelCase_="</d>" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<unk>" , UpperCamelCase_="</n>" , UpperCamelCase_="</_>" , UpperCamelCase_="left" , **UpperCamelCase_ , ) -> int:
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=UpperCamelCase_ , eod_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , line_token=UpperCamelCase_ , space_token=UpperCamelCase_ , padding_side=UpperCamelCase_ , **UpperCamelCase_ , )
__lowercase : Optional[int] = bod_token
__lowercase : Optional[int] = eod_token
__lowercase : Union[str, Any] = load_vocab(UpperCamelCase_ )
__lowercase : List[str] = self.encoder[space_token]
__lowercase : Dict = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__lowercase : int = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase_ : x[1] ) )
__lowercase : List[str] = {v: k for k, v in self.encoder.items()}
__lowercase : Tuple = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowerCamelCase ( self ) -> List[str]:
return self.encoder[self.bod_token]
@property
def _lowerCamelCase ( self ) -> Optional[Any]:
return self.encoder[self.eod_token]
@property
def _lowerCamelCase ( self ) -> Optional[Any]:
return self.encoder["\n"]
@property
def _lowerCamelCase ( self ) -> int:
return len(self.encoder )
def _lowerCamelCase ( self ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[Any]:
__lowercase : List[str] = []
for x in jieba.cut(UpperCamelCase_ , cut_all=UpperCamelCase_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCamelCase_ ) )
return output_tokens
def _lowerCamelCase ( self , UpperCamelCase_ , **UpperCamelCase_ ) -> str:
__lowercase : Union[str, Any] = [i for i in token_ids if i >= 0]
__lowercase : int = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[Any]:
return token in self.encoder
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
return "".join(UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[int]:
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]:
return self.decoder.get(UpperCamelCase_ , self.unk_token )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]:
if os.path.isdir(UpperCamelCase_ ):
__lowercase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
__lowercase : Optional[Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
__lowercase : int = 0
if " " in self.encoder:
__lowercase : List[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
__lowercase : Dict = self.encoder['''\n''']
del self.encoder["\n"]
__lowercase : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase_ : x[1] ) )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
__lowercase : int = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ ))
return [1] + ([0] * len(UpperCamelCase_ ))
| 76
|
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if digit_amount > 0:
return round(number - int(__UpperCamelCase ) , __UpperCamelCase )
return number - int(__UpperCamelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 76
| 1
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ = logging.getLogger()
def __magic_name__ ( ) -> Tuple:
"""simple docstring"""
lowercase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
lowercase_ : Optional[Any] = parser.parse_args()
return args.f
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
def snake_case__ ( self ) -> None:
"""simple docstring"""
lowercase_ : List[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(snake_case__ )
def snake_case__ ( self, snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : int = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0, """run_glue_deebert.py""" )
with patch.object(snake_case__, """argv""", snake_case__ ):
lowercase_ : str = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(snake_case__, 0.666 )
@slow
@require_torch_non_multi_gpu
def snake_case__ ( self ) -> str:
"""simple docstring"""
lowercase_ : str = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(snake_case__ )
lowercase_ : str = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(snake_case__ )
lowercase_ : int = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(snake_case__ )
| 436
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : Tuple = """t5"""
__a : Optional[Any] = ["""past_key_values"""]
__a : Optional[int] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self, snake_case__=3_21_28, snake_case__=5_12, snake_case__=64, snake_case__=20_48, snake_case__=6, snake_case__=None, snake_case__=8, snake_case__=32, snake_case__=1_28, snake_case__=0.1, snake_case__=1E-6, snake_case__=1.0, snake_case__="relu", snake_case__=True, snake_case__=True, snake_case__=0, snake_case__=1, **snake_case__, ) -> List[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = vocab_size
lowercase_ : int = d_model
lowercase_ : int = d_kv
lowercase_ : Optional[Any] = d_ff
lowercase_ : Union[str, Any] = num_layers
lowercase_ : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase_ : str = num_heads
lowercase_ : int = relative_attention_num_buckets
lowercase_ : int = relative_attention_max_distance
lowercase_ : Any = dropout_rate
lowercase_ : Optional[Any] = layer_norm_epsilon
lowercase_ : int = initializer_factor
lowercase_ : Union[str, Any] = feed_forward_proj
lowercase_ : Any = use_cache
lowercase_ : Optional[int] = self.feed_forward_proj.split("""-""" )
lowercase_ : Optional[Any] = act_info[-1]
lowercase_ : Union[str, Any] = act_info[0] == """gated"""
if len(snake_case__ ) > 1 and act_info[0] != "gated" or len(snake_case__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase_ : Optional[int] = """gelu_new"""
super().__init__(
pad_token_id=snake_case__, eos_token_id=snake_case__, is_encoder_decoder=snake_case__, **snake_case__, )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
@property
def snake_case__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase_ : str = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
lowercase_ : Any = """past_encoder_sequence + sequence"""
lowercase_ : Tuple = {0: """batch"""}
lowercase_ : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowercase_ : int = {0: """batch""", 1: """decoder_sequence"""}
lowercase_ : List[str] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(snake_case__, direction="""inputs""" )
return common_inputs
@property
def snake_case__ ( self ) -> int:
"""simple docstring"""
return 13
| 436
| 1
|
import os
from collections.abc import Iterator
def __lowerCamelCase ( __lowerCAmelCase : str = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(__lowerCAmelCase ):
__UpperCamelCase : Dict = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCAmelCase , __lowerCAmelCase ).lstrip("""./""" )
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] ) -> int:
return f'{i * " "}*' if i else "\n##"
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> str:
__UpperCamelCase : List[str] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(__lowerCAmelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def __lowerCamelCase ( __lowerCAmelCase : str = "." ) -> None:
__UpperCamelCase : List[Any] = """"""
for filepath in sorted(good_file_paths(__lowerCAmelCase ) ):
__UpperCamelCase , __UpperCamelCase : List[str] = os.path.split(__lowerCAmelCase )
if filepath != old_path:
__UpperCamelCase : Tuple = print_path(__lowerCAmelCase , __lowerCAmelCase )
__UpperCamelCase : List[str] = (filepath.count(os.sep ) + 1) if filepath else 0
__UpperCamelCase : Optional[Any] = f'{filepath}/{filename}'.replace(""" """ , """%20""" )
__UpperCamelCase : List[Any] = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f'{md_prefix(__lowerCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('.')
| 269
|
from __future__ import annotations
from typing import Any
class _A :
def __init__( self : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : float = 0 ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Tuple = row, column
__UpperCamelCase : Tuple = [[default_value for c in range(lowerCamelCase__ )] for r in range(lowerCamelCase__ )]
def __str__( self : List[Any] ):
"""simple docstring"""
__UpperCamelCase : Dict = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__UpperCamelCase : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__UpperCamelCase : int = max(lowerCamelCase__ , len(str(lowerCamelCase__ ) ) )
__UpperCamelCase : Union[str, Any] = f'%{max_element_length}s'
# Make string and return
def single_line(lowerCamelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
__UpperCamelCase : List[Any] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase__ ) for row_vector in self.array )
return s
def __repr__( self : Any ):
"""simple docstring"""
return str(self )
def a ( self : str , lowerCamelCase__ : tuple[int, int] ):
"""simple docstring"""
if not (isinstance(lowerCamelCase__ , (list, tuple) ) and len(lowerCamelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Union[str, Any] , lowerCamelCase__ : tuple[int, int] ):
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Union[str, Any] , lowerCamelCase__ : tuple[int, int] , lowerCamelCase__ : float ):
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase__ )
__UpperCamelCase : str = value
def __add__( self : int , lowerCamelCase__ : Matrix ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert self.row == another.row and self.column == another.column
# Add
__UpperCamelCase : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__UpperCamelCase : Optional[int] = self[r, c] + another[r, c]
return result
def __neg__( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__UpperCamelCase : Any = -self[r, c]
return result
def __sub__( self : Tuple , lowerCamelCase__ : Matrix ):
"""simple docstring"""
return self + (-another)
def __mul__( self : Tuple , lowerCamelCase__ : int | float | Matrix ):
"""simple docstring"""
if isinstance(lowerCamelCase__ , (int, float) ): # Scalar multiplication
__UpperCamelCase : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__UpperCamelCase : Tuple = self[r, c] * another
return result
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ): # Matrix multiplication
assert self.column == another.row
__UpperCamelCase : List[Any] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__UpperCamelCase : Any = f'Unsupported type given for another ({type(lowerCamelCase__ )})'
raise TypeError(lowerCamelCase__ )
def a ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : Dict = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__UpperCamelCase : str = self[r, c]
return result
def a ( self : Any , lowerCamelCase__ : Matrix , lowerCamelCase__ : Matrix ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__UpperCamelCase : Optional[int] = v.transpose()
__UpperCamelCase : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __lowerCamelCase ( ) -> None:
# a^(-1)
__UpperCamelCase : str = Matrix(3 , 3 , 0 )
for i in range(3 ):
__UpperCamelCase : List[str] = 1
print(f'a^(-1) is {ainv}' )
# u, v
__UpperCamelCase : Any = Matrix(3 , 1 , 0 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = 1, 2, -3
__UpperCamelCase : int = Matrix(3 , 1 , 0 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict = 4, -2, 5
print(f'u is {u}' )
print(f'v is {v}' )
print(f'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowerCAmelCase , __lowerCAmelCase )}' )
def __lowerCamelCase ( ) -> None:
import doctest
doctest.testmod()
testa()
| 269
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def a (lowerCAmelCase__ ):
__a = botoa.client("""iam""" )
__a = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowerCAmelCase__ , AssumeRolePolicyDocument=json.dumps(lowerCAmelCase__ , indent=2 ) )
__a = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowerCAmelCase__ , PolicyName=f'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(lowerCAmelCase__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'''role {role_name} already exists. Using existing one''' )
def a (lowerCAmelCase__ ):
__a = botoa.client("""iam""" )
return iam_client.get_role(RoleName=lowerCAmelCase__ )["Role"]["Arn"]
def a ():
__a = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , lowerCAmelCase__ , )
__a = None
if credentials_configuration == 0:
__a = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
__a = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
__a = _ask_field("""AWS Access Key ID: """ )
__a = aws_access_key_id
__a = _ask_field("""AWS Secret Access Key: """ )
__a = aws_secret_access_key
__a = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
__a = aws_region
__a = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , lowerCAmelCase__ , )
if role_management == 0:
__a = _ask_field("""Enter your IAM role name: """ )
else:
__a = """accelerate_sagemaker_execution_role"""
print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(lowerCAmelCase__ )
__a = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
__a = None
if is_custom_docker_image:
__a = _ask_field("""Enter your Docker image: """ , lambda lowerCAmelCase__ : str(lowerCAmelCase__ ).lower() )
__a = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
__a = None
if is_sagemaker_inputs_enabled:
__a = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda lowerCAmelCase__ : str(lowerCAmelCase__ ).lower() , )
__a = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
__a = None
if is_sagemaker_metrics_enabled:
__a = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda lowerCAmelCase__ : str(lowerCAmelCase__ ).lower() , )
__a = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
__a = {}
__a = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
__a = """dynamo_"""
__a = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__a = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
__a = _ask_options(
"""Which mode do you want to use?""" , lowerCAmelCase__ , lambda lowerCAmelCase__ : TORCH_DYNAMO_MODES[int(lowerCAmelCase__ )] , default="""default""" , )
__a = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
__a = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
__a = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
__a = _ask_options(
lowerCAmelCase__ , lowerCAmelCase__ , lambda lowerCAmelCase__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowerCAmelCase__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__a = _ask_field(lowerCAmelCase__ , lambda lowerCAmelCase__ : str(lowerCAmelCase__ ).lower() , default="""ml.p3.2xlarge""" )
__a = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__a = _ask_field(
"""How many machines do you want use? [1]: """ , lowerCAmelCase__ , default=1 , )
__a = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=lowerCAmelCase__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=lowerCAmelCase__ , use_cpu=lowerCAmelCase__ , dynamo_config=lowerCAmelCase__ , eca_instance_type=lowerCAmelCase__ , profile=lowerCAmelCase__ , region=lowerCAmelCase__ , iam_role_name=lowerCAmelCase__ , mixed_precision=lowerCAmelCase__ , num_machines=lowerCAmelCase__ , sagemaker_inputs_file=lowerCAmelCase__ , sagemaker_metrics_file=lowerCAmelCase__ , )
| 209
|
import doctest
from collections import deque
import numpy as np
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self ):
__a = [2, 1, 2, -1]
__a = [1, 2, 3, 4]
def snake_case_ ( self ):
__a = len(self.first_signal )
__a = len(self.second_signal )
__a = max(__A , __A )
# create a zero matrix of max_length x max_length
__a = [[0] * max_length for i in range(__A )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__A ):
__a = deque(self.second_signal )
rotated_signal.rotate(__A )
for j, item in enumerate(__A ):
matrix[i][j] += item
# multiply the matrix with the first signal
__a = np.matmul(np.transpose(__A ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__A , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 209
| 1
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : str = '▁'
UpperCAmelCase_ : str = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
UpperCAmelCase_ : Optional[int] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
UpperCAmelCase_ : Optional[int] = {
'facebook/s2t-small-librispeech-asr': 1_0_2_4,
}
UpperCAmelCase_ : List[str] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
UpperCAmelCase_ : Any = {'mustc': MUSTC_LANGS}
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
__lowercase : int = VOCAB_FILES_NAMES
__lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = MAX_MODEL_INPUT_SIZES
__lowercase : Any = ['''input_ids''', '''attention_mask''']
__lowercase : Optional[int] = []
def __init__( self , __lowercase , __lowercase , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase="<unk>" , __lowercase=False , __lowercase=False , __lowercase=None , __lowercase=None , __lowercase = None , **__lowercase , ):
"""simple docstring"""
__A : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , do_upper_case=__lowercase , do_lower_case=__lowercase , tgt_lang=__lowercase , lang_codes=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
__A : Dict = do_upper_case
__A : Union[str, Any] = do_lower_case
__A : Any = load_json(__lowercase )
__A : int = {v: k for k, v in self.encoder.items()}
__A : Optional[int] = spm_file
__A : Dict = load_spm(__lowercase , self.sp_model_kwargs )
if lang_codes is not None:
__A : int = lang_codes
__A : Union[str, Any] = LANGUAGES[lang_codes]
__A : Tuple = [F"""<lang:{lang}>""" for lang in self.langs]
__A : Tuple = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
__A : Union[str, Any] = self.lang_tokens
__A : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__A : Any = {}
@property
def snake_case__ ( self ):
"""simple docstring"""
return len(self.encoder )
@property
def snake_case__ ( self ):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A : Union[str, Any] = new_tgt_lang
self.set_tgt_lang_special_tokens(__lowercase )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A : Dict = self.lang_code_to_id[tgt_lang]
__A : List[str] = [lang_code_id]
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
return self.encoder.get(__lowercase , self.encoder[self.unk_token] )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
return self.decoder.get(__lowercase , self.unk_token )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A : Optional[int] = []
__A : Dict = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__A : int = self.sp_model.decode(__lowercase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__A : Union[str, Any] = []
else:
current_sub_tokens.append(__lowercase )
__A : Optional[Any] = self.sp_model.decode(__lowercase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def snake_case__ ( self , __lowercase , __lowercase=None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case__ ( self , __lowercase , __lowercase = None , __lowercase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
__A : Union[str, Any] = [1] * len(self.prefix_tokens )
__A : Optional[int] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowercase )) + suffix_ones
return prefix_ones + ([0] * len(__lowercase )) + ([0] * len(__lowercase )) + suffix_ones
def snake_case__ ( self ):
"""simple docstring"""
__A : Any = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
__A : str = self.__dict__.copy()
__A : Optional[Any] = None
return state
def __setstate__( self , __lowercase ):
"""simple docstring"""
__A : Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__A : int = {}
__A : List[str] = load_spm(self.spm_file , self.sp_model_kwargs )
def snake_case__ ( self , __lowercase , __lowercase = None ):
"""simple docstring"""
__A : Tuple = Path(__lowercase )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
__A : Any = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
__A : Tuple = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __lowercase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowercase )
elif not os.path.isfile(self.spm_file ):
with open(__lowercase , 'wb' ) as fi:
__A : Tuple = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (str(__lowercase ), str(__lowercase ))
def _lowercase ( UpperCamelCase__ : str, UpperCamelCase__ : Dict[str, Any] ):
__A : List[Any] = sentencepiece.SentencePieceProcessor(**UpperCamelCase__ )
spm.Load(str(UpperCamelCase__ ) )
return spm
def _lowercase ( UpperCamelCase__ : str ):
with open(UpperCamelCase__, 'r' ) as f:
return json.load(UpperCamelCase__ )
def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : str ):
with open(UpperCamelCase__, 'w' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__, indent=2 )
| 365
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :Any = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''lxmert'''
a__ ={}
def __init__( self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=9_5_0_0 , A=1_6_0_0 , A=4_0_0 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1E-12 , A=9 , A=5 , A=5 , A=2_0_4_8 , A=4 , A=6.67 , A=True , A=True , A=True , A=True , A=True , A=True , A=True , **A , ) -> List[Any]:
_UpperCAmelCase : int = vocab_size
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : str = attention_probs_dropout_prob
_UpperCAmelCase : Dict = max_position_embeddings
_UpperCAmelCase : str = type_vocab_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : List[str] = layer_norm_eps
_UpperCAmelCase : Optional[int] = num_qa_labels
_UpperCAmelCase : Tuple = num_object_labels
_UpperCAmelCase : Optional[int] = num_attr_labels
_UpperCAmelCase : List[str] = l_layers
_UpperCAmelCase : Any = x_layers
_UpperCAmelCase : Tuple = r_layers
_UpperCAmelCase : Optional[Any] = visual_feat_dim
_UpperCAmelCase : Optional[int] = visual_pos_dim
_UpperCAmelCase : Optional[Any] = visual_loss_normalizer
_UpperCAmelCase : int = task_matched
_UpperCAmelCase : Optional[Any] = task_mask_lm
_UpperCAmelCase : Union[str, Any] = task_obj_predict
_UpperCAmelCase : Optional[int] = task_qa
_UpperCAmelCase : Union[str, Any] = visual_obj_loss
_UpperCAmelCase : List[str] = visual_attr_loss
_UpperCAmelCase : Optional[int] = visual_feat_loss
_UpperCAmelCase : Tuple = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**A )
| 506
| 0
|
'''simple docstring'''
class a__:
def __init__( self ) -> Optional[Any]:
snake_case__ ={}
def _lowercase ( self ) -> List[Any]:
print(self.vertex )
for i in self.vertex:
print(_lowercase , " -> " , " -> ".join([str(_lowercase ) for j in self.vertex[i]] ) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowercase )
else:
# else make a new vertex
snake_case__ =[to_vertex]
def _lowercase ( self ) -> Dict:
snake_case__ =[False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowercase , _lowercase )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
snake_case__ =True
print(_lowercase , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowercase , _lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 719
|
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
SCREAMING_SNAKE_CASE__ : Any = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
SCREAMING_SNAKE_CASE__ : List[str] = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def a ( UpperCamelCase_ : Dict ) -> Union[str, Any]:
def remove_articles(UpperCamelCase_ : List[str] ):
snake_case__ =re.compile(r'\b(a|an|the)\b' , re.UNICODE )
return re.sub(UpperCamelCase_ , ' ' , UpperCamelCase_ )
def white_space_fix(UpperCamelCase_ : List[str] ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase_ : Tuple ):
snake_case__ =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase_ : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase_ ) ) ) )
def a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
return int(normalize_answer(UpperCamelCase_ ) == normalize_answer(UpperCamelCase_ ) )
def a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> Dict:
snake_case__ =[any(compute_exact(UpperCamelCase_ , UpperCamelCase_ ) for ref in refs ) for pred, refs in zip(UpperCamelCase_ , UpperCamelCase_ )]
return (sum(UpperCamelCase_ ) / len(UpperCamelCase_ )) * 100
def a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any ) -> Union[str, Any]:
snake_case__ =[rgram for rgrams in rgramslist for rgram in rgrams]
snake_case__ =Counter(UpperCamelCase_ )
snake_case__ =Counter(UpperCamelCase_ )
snake_case__ =Counter()
for sgram, scount in sgramcounter.items():
snake_case__ =scount * numref
snake_case__ =Counter(UpperCamelCase_ )
snake_case__ =Counter()
for cgram, ccount in cgramcounter.items():
snake_case__ =ccount * numref
# KEEP
snake_case__ =sgramcounter_rep & cgramcounter_rep
snake_case__ =keepgramcounter_rep & rgramcounter
snake_case__ =sgramcounter_rep & rgramcounter
snake_case__ =0
snake_case__ =0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case__ =1
snake_case__ =1
if len(UpperCamelCase_ ) > 0:
snake_case__ =keeptmpscorea / len(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
snake_case__ =keeptmpscorea / sum(keepgramcounterall_rep.values() )
snake_case__ =0
if keepscore_precision > 0 or keepscore_recall > 0:
snake_case__ =2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
snake_case__ =sgramcounter_rep - cgramcounter_rep
snake_case__ =delgramcounter_rep - rgramcounter
snake_case__ =sgramcounter_rep - rgramcounter
snake_case__ =0
snake_case__ =0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case__ =1
if len(UpperCamelCase_ ) > 0:
snake_case__ =deltmpscorea / len(UpperCamelCase_ )
# ADDITION
snake_case__ =set(UpperCamelCase_ ) - set(UpperCamelCase_ )
snake_case__ =set(UpperCamelCase_ ) & set(UpperCamelCase_ )
snake_case__ =set(UpperCamelCase_ ) - set(UpperCamelCase_ )
snake_case__ =0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case__ =1
snake_case__ =1
if len(UpperCamelCase_ ) > 0:
snake_case__ =addtmpscore / len(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
snake_case__ =addtmpscore / len(UpperCamelCase_ )
snake_case__ =0
if addscore_precision > 0 or addscore_recall > 0:
snake_case__ =2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def a ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str ) -> Optional[int]:
snake_case__ =len(UpperCamelCase_ )
snake_case__ =ssent.split(' ' )
snake_case__ =csent.split(' ' )
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
for rsent in rsents:
snake_case__ =rsent.split(' ' )
snake_case__ =[]
snake_case__ =[]
snake_case__ =[]
ragramslist.append(UpperCamelCase_ )
for i in range(0 , len(UpperCamelCase_ ) - 1 ):
if i < len(UpperCamelCase_ ) - 1:
snake_case__ =ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 2:
snake_case__ =ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 3:
snake_case__ =ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(UpperCamelCase_ )
ragramslist.append(UpperCamelCase_ )
ragramslist.append(UpperCamelCase_ )
ragramslist.append(UpperCamelCase_ )
for i in range(0 , len(UpperCamelCase_ ) - 1 ):
if i < len(UpperCamelCase_ ) - 1:
snake_case__ =sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 2:
snake_case__ =sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 3:
snake_case__ =sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(UpperCamelCase_ )
for i in range(0 , len(UpperCamelCase_ ) - 1 ):
if i < len(UpperCamelCase_ ) - 1:
snake_case__ =cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 2:
snake_case__ =cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(UpperCamelCase_ )
if i < len(UpperCamelCase_ ) - 3:
snake_case__ =cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(UpperCamelCase_ )
((snake_case__) , (snake_case__) , (snake_case__)) =SARIngram(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((snake_case__) , (snake_case__) , (snake_case__)) =SARIngram(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((snake_case__) , (snake_case__) , (snake_case__)) =SARIngram(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((snake_case__) , (snake_case__) , (snake_case__)) =SARIngram(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
snake_case__ =sum([keepascore, keepascore, keepascore, keepascore] ) / 4
snake_case__ =sum([delascore, delascore, delascore, delascore] ) / 4
snake_case__ =sum([addascore, addascore, addascore, addascore] ) / 4
snake_case__ =(avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def a ( UpperCamelCase_ : Any , UpperCamelCase_ : bool = True , UpperCamelCase_ : str = "13a" , UpperCamelCase_ : bool = True ) -> Dict:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
snake_case__ =sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
snake_case__ =sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase_ )()(UpperCamelCase_ )
else:
snake_case__ =sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase_ )
elif tokenizer == "moses":
snake_case__ =sacremoses.MosesTokenizer().tokenize(UpperCamelCase_ , return_str=UpperCamelCase_ , escape=UpperCamelCase_ )
elif tokenizer == "penn":
snake_case__ =sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase_ , return_str=UpperCamelCase_ )
else:
snake_case__ =sentence
if not return_str:
snake_case__ =normalized_sent.split()
return normalized_sent
def a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any ) -> List[str]:
if not (len(UpperCamelCase_ ) == len(UpperCamelCase_ ) == len(UpperCamelCase_ )):
raise ValueError('Sources length must match predictions and references lengths.' )
snake_case__ =0
for src, pred, refs in zip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
sari_score += SARIsent(normalize(UpperCamelCase_ ) , normalize(UpperCamelCase_ ) , [normalize(UpperCamelCase_ ) for sent in refs] )
snake_case__ =sari_score / len(UpperCamelCase_ )
return 100 * sari_score
def a ( UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]="exp" , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Any=False , ) -> Tuple:
snake_case__ =len(references[0] )
if any(len(UpperCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
snake_case__ =[[refs[i] for refs in references] for i in range(UpperCamelCase_ )]
snake_case__ =sacrebleu.corpus_bleu(
UpperCamelCase_ , UpperCamelCase_ , smooth_method=UpperCamelCase_ , smooth_value=UpperCamelCase_ , force=UpperCamelCase_ , lowercase=UpperCamelCase_ , use_effective_order=UpperCamelCase_ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__( datasets.Metric ):
def _lowercase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] , reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
snake_case__ ={}
result.update({'sari': compute_sari(sources=_UpperCAmelCase , predictions=_UpperCAmelCase , references=_UpperCAmelCase )} )
result.update({'sacrebleu': compute_sacrebleu(predictions=_UpperCAmelCase , references=_UpperCAmelCase )} )
result.update({'exact': compute_em(predictions=_UpperCAmelCase , references=_UpperCAmelCase )} )
return result
| 581
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : Dict = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Union[str, Any] = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46
|
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_A = logging.get_logger(__name__)
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ) -> Optional[Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
UpperCAmelCase__ : Tuple = os.path.abspath(lowerCAmelCase )
logger.info(F"""Loading PyTorch weights from {pt_path}""" )
UpperCAmelCase__ : Any = torch.load(lowerCAmelCase , map_location="""cpu""" )
logger.info(F"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
UpperCAmelCase__ : str = convert_pytorch_state_dict_to_flax(lowerCAmelCase , lowerCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCAmelCase__ : Optional[Any] = convert_pytorch_sharded_state_dict_to_flax(lowerCAmelCase , lowerCAmelCase )
return flax_state_dict
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(lowerCAmelCase ) -> bool:
return len(set(lowerCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCAmelCase__ : Any = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCAmelCase__ : Tuple = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCAmelCase__ : Any = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCAmelCase__ : Tuple = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase__ : List[str] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase__ : int = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(lowerCAmelCase ):
UpperCAmelCase__ : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase__ : Tuple = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase__ : List[str] = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCAmelCase__ : Optional[int] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCAmelCase__ : int = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCAmelCase__ : Optional[int] = pt_tuple_key[-2] + """_v"""
if name is not None:
UpperCAmelCase__ : List[str] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Dict:
# convert pytorch tensor to numpy
UpperCAmelCase__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase__ : Optional[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCAmelCase__ : Any = flax_model.params["""params"""]
else:
UpperCAmelCase__ : Optional[Any] = flax_model.params
UpperCAmelCase__ : Tuple = flatten_dict(lowerCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase__ : Dict = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(lowerCAmelCase )
UpperCAmelCase__ : List[str] = {}
UpperCAmelCase__ : str = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase__ : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase__ : Optional[Any] = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
UpperCAmelCase__ : str = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase__ : Optional[int] = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase__ , UpperCAmelCase__ : int = rename_key_and_reshape_tensor(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# add model prefix if necessary
UpperCAmelCase__ : str = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase__ : str = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCAmelCase__ : Any = jnp.asarray(lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowerCAmelCase , lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase__ : Union[str, Any] = jnp.asarray(lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase__ : Any = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
import torch
# Load the index
UpperCAmelCase__ : List[str] = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCAmelCase__ : Tuple = torch.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase__ : Optional[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase__ : List[Any] = flax_model.params["""params"""]
UpperCAmelCase__ : int = flatten_dict(lowerCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
UpperCAmelCase__ : Any = flax_model.params
UpperCAmelCase__ : List[Any] = flatten_dict(lowerCAmelCase )
UpperCAmelCase__ : int = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase__ : str = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase__ : Optional[int] = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
UpperCAmelCase__ : Dict = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase__ : Optional[int] = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = rename_key_and_reshape_tensor(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# add model prefix if necessary
UpperCAmelCase__ : Tuple = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase__ : Optional[int] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCAmelCase__ : Any = jnp.asarray(lowerCAmelCase )
continue
if "var" in flax_key[-1]:
UpperCAmelCase__ : List[str] = jnp.asarray(lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowerCAmelCase , lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase__ : Optional[int] = jnp.asarray(lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase__ : List[str] = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = os.path.abspath(lowerCAmelCase )
logger.info(F"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
UpperCAmelCase__ : Dict = getattr(lowerCAmelCase , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(lowerCAmelCase , """rb""" ) as state_f:
try:
UpperCAmelCase__ : Any = from_bytes(lowerCAmelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
UpperCAmelCase__ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda lowerCAmelCase : x.dtype == jnp.bfloataa , lowerCAmelCase ) ).values()
if any(lowerCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
UpperCAmelCase__ : Union[str, Any] = jax.tree_util.tree_map(
lambda lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowerCAmelCase )
UpperCAmelCase__ : int = flatten_dict(lowerCAmelCase )
UpperCAmelCase__ : str = pt_model.state_dict()
UpperCAmelCase__ : Optional[int] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
UpperCAmelCase__ : str = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Optional[int] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCAmelCase__ : Dict = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCAmelCase__ : List[str] = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase__ : int = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase__ : List[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(lowerCAmelCase ) not in pt_model_dict:
# conv layer
UpperCAmelCase__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase__ : str = jnp.transpose(lowerCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase ) not in pt_model_dict:
# linear layer
UpperCAmelCase__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase__ : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase__ : Any = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCAmelCase__ : Any = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
UpperCAmelCase__ : Dict = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
UpperCAmelCase__ : int = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCAmelCase__ : Optional[Any] = """.""".join(lowerCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCAmelCase__ : Optional[int] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCAmelCase__ : str = key.split(""".""" )
UpperCAmelCase__ : Optional[int] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCAmelCase__ : Dict = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCAmelCase__ : Dict = key_components[-2] + """_v"""
if name is not None:
UpperCAmelCase__ : Optional[Any] = key_components[:-3] + [name]
UpperCAmelCase__ : Any = """.""".join(lowerCAmelCase )
UpperCAmelCase__ : List[str] = key
if flax_key in special_pt_names:
UpperCAmelCase__ : Optional[Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
UpperCAmelCase__ : Dict = np.asarray(lowerCAmelCase ) if not isinstance(lowerCAmelCase , np.ndarray ) else flax_tensor
UpperCAmelCase__ : Optional[int] = torch.from_numpy(lowerCAmelCase )
# remove from missing keys
missing_keys.remove(lowerCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowerCAmelCase )
pt_model.load_state_dict(lowerCAmelCase )
# re-transform missing_keys to list
UpperCAmelCase__ : Optional[Any] = list(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(F"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(lowerCAmelCase ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
else:
logger.warning(
F"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
"""If your task is similar to the task the model of the checkpoint was trained on, """
F"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 182
| 0
|
"""simple docstring"""
from typing import Any
def lowercase ( A_ , A_ , A_ , A_ , A_ , )-> list:
'''simple docstring'''
_validation(
A_ , A_ , A_ , A_ , A_ , )
# Creates data structures and fill initial step
a : dict = {}
a : dict = {}
for state in states_space:
a : Union[str, Any] = observations_space[0]
a : Tuple = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
a : Tuple = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(A_ ) ):
a : List[str] = observations_space[o]
a : Optional[Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
a : Any = ""
a : int = -1
for k_state in states_space:
a : Optional[Any] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
a : int = probability
a : List[str] = k_state
# Update probabilities and pointers dicts
a : Optional[Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
a : List[Any] = arg_max
# The final observation
a : List[Any] = observations_space[len(A_ ) - 1]
# argmax for given final observation
a : Any = ""
a : Tuple = -1
for k_state in states_space:
a : Optional[Any] = probabilities[(k_state, final_observation)]
if probability > max_probability:
a : Any = probability
a : Any = k_state
a : Union[str, Any] = arg_max
# Process pointers backwards
a : str = last_state
a : List[Any] = []
for o in range(len(A_ ) - 1 , -1 , -1 ):
result.append(A_ )
a : int = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase ( A_ , A_ , A_ , A_ , A_ , )-> None:
'''simple docstring'''
_validate_not_empty(
A_ , A_ , A_ , A_ , A_ , )
_validate_lists(A_ , A_ )
_validate_dicts(
A_ , A_ , A_ )
def lowercase ( A_ , A_ , A_ , A_ , A_ , )-> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowercase ( A_ , A_ )-> None:
'''simple docstring'''
_validate_list(A_ , "observations_space" )
_validate_list(A_ , "states_space" )
def lowercase ( A_ , A_ )-> None:
'''simple docstring'''
if not isinstance(_object , A_ ):
a : int = F'''{var_name} must be a list'''
raise ValueError(A_ )
else:
for x in _object:
if not isinstance(A_ , A_ ):
a : str = F'''{var_name} must be a list of strings'''
raise ValueError(A_ )
def lowercase ( A_ , A_ , A_ , )-> None:
'''simple docstring'''
_validate_dict(A_ , "initial_probabilities" , A_ )
_validate_nested_dict(A_ , "transition_probabilities" )
_validate_nested_dict(A_ , "emission_probabilities" )
def lowercase ( A_ , A_ )-> None:
'''simple docstring'''
_validate_dict(_object , A_ , A_ )
for x in _object.values():
_validate_dict(A_ , A_ , A_ , A_ )
def lowercase ( A_ , A_ , A_ , A_ = False )-> None:
'''simple docstring'''
if not isinstance(_object , A_ ):
a : Optional[int] = F'''{var_name} must be a dict'''
raise ValueError(A_ )
if not all(isinstance(A_ , A_ ) for x in _object ):
a : Optional[Any] = F'''{var_name} all keys must be strings'''
raise ValueError(A_ )
if not all(isinstance(A_ , A_ ) for x in _object.values() ):
a : Optional[Any] = "nested dictionary " if nested else ""
a : Dict = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(A_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 135
|
"""simple docstring"""
from timeit import timeit
def lowercase ( A_ )-> int:
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
a : Dict = 0
while number:
number &= number - 1
result += 1
return result
def lowercase ( A_ )-> int:
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
a : int = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowercase ( )-> None:
'''simple docstring'''
def do_benchmark(A_ ) -> None:
a : Tuple = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(A_ ) = }''' )
a : List[Any] = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=A_ )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(A_ ) = }''' )
a : Dict = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=A_ , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(A_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 135
| 1
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : int = 'git_vision_model'
def __init__( self : int ,lowerCamelCase : Optional[int]=768 ,lowerCamelCase : Dict=3072 ,lowerCamelCase : List[str]=12 ,lowerCamelCase : List[Any]=12 ,lowerCamelCase : int=3 ,lowerCamelCase : Tuple=224 ,lowerCamelCase : str=16 ,lowerCamelCase : int="quick_gelu" ,lowerCamelCase : List[Any]=1E-5 ,lowerCamelCase : Dict=0.0 ,lowerCamelCase : Union[str, Any]=0.02 ,**lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = hidden_act
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] ,lowerCamelCase : Union[str, os.PathLike] ,**lowerCamelCase : Tuple ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCamelCase ,**lowerCamelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__SCREAMING_SNAKE_CASE = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase ,**lowerCamelCase )
class __a ( _snake_case ):
__UpperCamelCase : str = 'git'
def __init__( self : Any ,lowerCamelCase : Optional[int]=None ,lowerCamelCase : List[str]=3_0522 ,lowerCamelCase : Any=768 ,lowerCamelCase : int=6 ,lowerCamelCase : Tuple=12 ,lowerCamelCase : List[Any]=3072 ,lowerCamelCase : int="gelu" ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Optional[int]=0.1 ,lowerCamelCase : int=1024 ,lowerCamelCase : str=0.02 ,lowerCamelCase : Union[str, Any]=1E-1_2 ,lowerCamelCase : Any=0 ,lowerCamelCase : str="absolute" ,lowerCamelCase : Dict=True ,lowerCamelCase : List[Any]=False ,lowerCamelCase : List[Any]=101 ,lowerCamelCase : Tuple=102 ,lowerCamelCase : List[str]=None ,**lowerCamelCase : Any ,):
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,pad_token_id=lowerCamelCase ,**lowerCamelCase )
if vision_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__SCREAMING_SNAKE_CASE = GitVisionConfig(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = tie_word_embeddings
__SCREAMING_SNAKE_CASE = num_image_with_embedding
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = eos_token_id
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 109
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""",
"""kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""",
"""kssteven/ibert-roberta-large-mnli""": (
"""https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"""
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = """ibert"""
def __init__( self : List[Any] , lowerCAmelCase : Any=3_0522 , lowerCAmelCase : Tuple=768 , lowerCAmelCase : str=12 , lowerCAmelCase : str=12 , lowerCAmelCase : str=3072 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : int=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Tuple=512 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : Optional[Any]=1E-12 , lowerCAmelCase : Any=1 , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : str="absolute" , lowerCAmelCase : Any=False , lowerCAmelCase : Optional[Any]="none" , **lowerCAmelCase : Optional[int] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase)
_snake_case : List[str] = vocab_size
_snake_case : Tuple = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : Union[str, Any] = hidden_act
_snake_case : str = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : str = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : int = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : Tuple = position_embedding_type
_snake_case : Union[str, Any] = quant_mode
_snake_case : List[Any] = force_dequant
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Tuple) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_snake_case : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
| 477
| 0
|
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase : int =logging.get_logger(__name__)
lowerCamelCase : List[str] ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase : Optional[int] ={
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : Union[str, Any] ={
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : Optional[Any] ={
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : Optional[int] ={
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_12,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_12,
}
lowerCamelCase : Optional[Any] ={
'''facebook/dpr-question_encoder-single-nq-base''': 5_12,
'''facebook/dpr-question_encoder-multiset-base''': 5_12,
}
lowerCamelCase : int ={
'''facebook/dpr-reader-single-nq-base''': 5_12,
'''facebook/dpr-reader-multiset-base''': 5_12,
}
lowerCamelCase : Optional[int] ={
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase : Tuple ={
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase : List[Any] ={
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class __snake_case( A_ ):
'''simple docstring'''
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = DPRContextEncoderTokenizer
class __snake_case( A_ ):
'''simple docstring'''
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = DPRQuestionEncoderTokenizer
lowerCamelCase : List[Any] =collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCamelCase : List[Any] =collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCamelCase : Optional[int] =r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(A_ )
class __snake_case:
'''simple docstring'''
def __call__( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , **__lowerCamelCase , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
elif titles is None or texts is None:
__A : Optional[Any] = titles if texts is None else texts
return super().__call__(
__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
__A : List[str] = titles if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [titles]
__A : Optional[int] = texts if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [texts]
__A : Tuple = len(__lowerCamelCase )
__A : Optional[int] = questions if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [questions] * n_passages
assert len(__lowerCamelCase ) == len(
__lowerCamelCase ), F'There should be as many titles than texts but got {len(__lowerCamelCase )} titles and {len(__lowerCamelCase )} texts.'
__A : int = super().__call__(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )['input_ids']
__A : Union[str, Any] = super().__call__(__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )['input_ids']
__A : int = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowerCamelCase , __lowerCamelCase )
]
}
if return_attention_mask is not False:
__A : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__A : Any = attention_mask
return self.pad(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase )
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 16 , __lowerCamelCase = 64 , __lowerCamelCase = 4 , ):
'''simple docstring'''
__A : int = reader_input['input_ids']
__A : Optional[int] = reader_output[:3]
__A : Optional[Any] = len(__lowerCamelCase )
__A : List[Any] = sorted(range(__lowerCamelCase ) , reverse=__lowerCamelCase , key=relevance_logits.__getitem__ )
__A : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__A : Any = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__A : Optional[int] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__A : List[str] = sequence_ids.index(self.pad_token_id )
else:
__A : int = len(__lowerCamelCase )
__A : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCamelCase , top_spans=__lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCamelCase , start_index=__lowerCamelCase , end_index=__lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
__A : Dict = []
for start_index, start_score in enumerate(__lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__A : Optional[Any] = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] , reverse=__lowerCamelCase )
__A : Optional[int] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'Wrong span indices: [{start_index}:{end_index}]'
__A : Union[str, Any] = end_index - start_index + 1
assert length <= max_answer_length, F'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A_ )
class __snake_case( A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
_UpperCAmelCase = DPRReaderTokenizer
| 701
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCamelCase : Tuple =logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] ={
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __snake_case( A_ ):
'''simple docstring'''
_UpperCAmelCase = "umt5"
_UpperCAmelCase = ["past_key_values"]
def __init__( self , __lowerCamelCase=250112 , __lowerCamelCase=512 , __lowerCamelCase=64 , __lowerCamelCase=1024 , __lowerCamelCase=8 , __lowerCamelCase=None , __lowerCamelCase=6 , __lowerCamelCase=32 , __lowerCamelCase=128 , __lowerCamelCase=0.1 , __lowerCamelCase=1e-6 , __lowerCamelCase=1.0 , __lowerCamelCase="gated-gelu" , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase="T5Tokenizer" , __lowerCamelCase=True , __lowerCamelCase=0 , __lowerCamelCase=1 , __lowerCamelCase=0 , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=__lowerCamelCase , tokenizer_class=__lowerCamelCase , tie_word_embeddings=__lowerCamelCase , pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
__A : Union[str, Any] = vocab_size
__A : Any = d_model
__A : str = d_kv
__A : List[Any] = d_ff
__A : Union[str, Any] = num_layers
__A : Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__A : Union[str, Any] = num_heads
__A : str = relative_attention_num_buckets
__A : Union[str, Any] = relative_attention_max_distance
__A : int = dropout_rate
__A : int = layer_norm_epsilon
__A : int = initializer_factor
__A : List[Any] = feed_forward_proj
__A : str = use_cache
__A : str = self.feed_forward_proj.split('-' )
__A : str = act_info[-1]
__A : Any = act_info[0] == 'gated'
if len(__lowerCamelCase ) > 1 and act_info[0] != "gated" or len(__lowerCamelCase ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__A : Optional[int] = 'gelu_new'
@property
def _a ( self ):
'''simple docstring'''
return self.d_model
@property
def _a ( self ):
'''simple docstring'''
return self.num_heads
@property
def _a ( self ):
'''simple docstring'''
return self.num_layers
class __snake_case( A_ ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _a ( self ):
'''simple docstring'''
__A : List[Any] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__A : int = 'past_encoder_sequence + sequence'
__A : List[str] = {0: 'batch'}
__A : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__A : List[str] = {0: 'batch', 1: 'decoder_sequence'}
__A : str = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _a ( self ):
'''simple docstring'''
return 13
@property
def _a ( self ):
'''simple docstring'''
return 5e-4
| 237
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.